diff --git a/share/man/man9/BUS_CHILD_LOCATION.9 b/share/man/man9/BUS_CHILD_LOCATION.9 new file mode 100644 index 000000000000..647b942b087a --- /dev/null +++ b/share/man/man9/BUS_CHILD_LOCATION.9 @@ -0,0 +1,61 @@ +.\" +.\" Copyright (c) 2021 Netflix, Inc. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR +.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +.\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +.\" IN NO EVENT SHALL THE DEVELOPERS BE LIABLE FOR ANY DIRECT, INDIRECT, +.\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +.\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +.\" +.\" $FreeBSD$ +.\" +.Dd April 22, 2021 +.Dt BUS_CHILD_LOCATION 9 +.Os +.Sh NAME +.Nm BUS_CHILD_LOCATION +.Nd "obtain the location of a child on the bus." +.Sh SYNOPSIS +.In sys/param.h +.In sys/bus.h +.In sys/sbuf.h +.Ft void +.Fn BUS_CHILD_LOCATION "device_t dev" "device_t child" "struct sbuf *sb" +.Sh DESCRIPTION +The +.Fn BUS_CHILD_LOCATION +method returns the location of the +.Dv child +device. +This location is a series of key=value pairs. +The string must be formatted as a space-separated list of key=value pairs. +Names may only contain alphanumeric characters, underscores ('_') and hyphens ('-'). +Values can contain any non-whitespace characters. +Values containing whitespace can be quoted with double quotes ('"'). +Double quotes and backslashes in quoted values can be escaped with backslashes ('\'). +.Pp +The location is defined as a series of characteristics of the +.Dv child +device that can be used to locate that device independent of what drivers are +attached. +Typically, these are slot numbers, bus addresses, or some topology formation. +Where possible, buses are encouraged to provide locations that are stable from +boot to boot and when other devices are added or removed. +A location is not dependent on the kind of device at that location. +.Sh SEE ALSO +.Xr bus 9 , +.Xr device 9 diff --git a/share/man/man9/BUS_CHILD_PNPINFO.9 b/share/man/man9/BUS_CHILD_PNPINFO.9 new file mode 100644 index 000000000000..eac9072d0514 --- /dev/null +++ b/share/man/man9/BUS_CHILD_PNPINFO.9 @@ -0,0 +1,65 @@ +.\" +.\" Copyright (c) 2021 Netflix, Inc. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR +.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +.\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +.\" IN NO EVENT SHALL THE DEVELOPERS BE LIABLE FOR ANY DIRECT, INDIRECT, +.\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +.\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +.\" +.\" $FreeBSD$ +.\" +.Dd April 22, 2021 +.Dt BUS_CHILD_PNPINFO 9 +.Os +.Sh NAME +.Nm BUS_CHILD_PNPINFO +.Nd "obtain the plug and play information from a device" +.Sh SYNOPSIS +.In sys/param.h +.In sys/bus.h +.In sys/sbuf.h +.Ft void +.Fn BUS_CHILD_PNPINFO "device_t dev" "device_t child" "struct sbuf *sb" +.Sh DESCRIPTION +The +.Fn BUS_CHILD_LOCATION +method returns the identifying information about the +.Dv child +device. +This information is called the plug and play (pnp) details by some buses. +This information is a series of key=value pairs. +The string must be formatted as a space-separated list of key=value pairs. +Names may only contain alphanumeric characters, underscores ('_') and hyphens ('-'). +Values can contain any non-whitespace characters. +Values containing whitespace can be quoted with double quotes ('"'). +Double quotes and backslashes in quoted values can be escaped with backslashes ('\'). +.Pp +The pnpinfo is defined as a series of characteristics of the +.Dv child +device that are independent of which drivers are attached, but +are used to allow drivers to claim a device. +Typically, plug and play information encodes who made the device, what the model +number is, and some generic details about the device. +By convention, only the generic information about the device that's used by +drivers on that bus to decide on accepting the device is reported. +Other configuration information (such as the cache burst size) needed for the +operation of the device, but that doesn't distinguish it broadly from other +devices is not reported. +.Sh SEE ALSO +.Xr bus 9 , +.Xr device 9 diff --git a/share/man/man9/Makefile b/share/man/man9/Makefile index 153870c34af9..19da0a012912 100644 --- a/share/man/man9/Makefile +++ b/share/man/man9/Makefile @@ -1,2411 +1,2413 @@ # $FreeBSD$ .include MAN= accept_filter.9 \ accf_data.9 \ accf_dns.9 \ accf_http.9 \ acl.9 \ alq.9 \ altq.9 \ atomic.9 \ backlight.9 \ bhnd.9 \ bhnd_erom.9 \ bios.9 \ bitset.9 \ boot.9 \ bpf.9 \ buf.9 \ buf_ring.9 \ BUF_ISLOCKED.9 \ BUF_LOCK.9 \ BUF_LOCKFREE.9 \ BUF_LOCKINIT.9 \ BUF_RECURSED.9 \ BUF_TIMELOCK.9 \ BUF_UNLOCK.9 \ bus_activate_resource.9 \ BUS_ADD_CHILD.9 \ bus_adjust_resource.9 \ bus_alloc_resource.9 \ BUS_BIND_INTR.9 \ bus_child_present.9 \ BUS_CHILD_DELETED.9 \ BUS_CHILD_DETACHED.9 \ + BUS_CHILD_LOCATION.9 \ + BUS_CHILD_PNPINFO.9 \ BUS_CONFIG_INTR.9 \ bus_delayed_attach_children.9 \ BUS_DESCRIBE_INTR.9 \ bus_dma.9 \ bus_generic_attach.9 \ bus_generic_detach.9 \ bus_generic_new_pass.9 \ bus_generic_print_child.9 \ bus_generic_read_ivar.9 \ bus_generic_shutdown.9 \ BUS_GET_CPUS.9 \ bus_get_resource.9 \ bus_map_resource.9 \ BUS_NEW_PASS.9 \ BUS_PRINT_CHILD.9 \ BUS_READ_IVAR.9 \ BUS_RESCAN.9 \ bus_release_resource.9 \ bus_set_pass.9 \ bus_set_resource.9 \ BUS_SETUP_INTR.9 \ bus_space.9 \ byteorder.9 \ callout.9 \ casuword.9 \ cd.9 \ cnv.9 \ condvar.9 \ config_intrhook.9 \ contigmalloc.9 \ copy.9 \ counter.9 \ cpuset.9 \ cr_cansee.9 \ critical_enter.9 \ cr_seeothergids.9 \ cr_seeotheruids.9 \ crypto.9 \ crypto_buffer.9 \ crypto_driver.9 \ crypto_request.9 \ crypto_session.9 \ CTASSERT.9 \ DB_COMMAND.9 \ DECLARE_GEOM_CLASS.9 \ DECLARE_MODULE.9 \ DEFINE_IFUNC.9 \ DELAY.9 \ devclass.9 \ devclass_find.9 \ devclass_get_device.9 \ devclass_get_devices.9 \ devclass_get_drivers.9 \ devclass_get_maxunit.9 \ devclass_get_name.9 \ devclass_get_softc.9 \ dev_clone.9 \ devfs_set_cdevpriv.9 \ device.9 \ device_add_child.9 \ DEVICE_ATTACH.9 \ device_delete_child.9 \ device_delete_children.9 \ DEVICE_DETACH.9 \ device_enable.9 \ device_find_child.9 \ device_get_children.9 \ device_get_devclass.9 \ device_get_driver.9 \ device_get_ivars.9 \ device_get_name.9 \ device_get_parent.9 \ device_get_softc.9 \ device_get_state.9 \ device_get_sysctl.9 \ device_get_unit.9 \ DEVICE_IDENTIFY.9 \ device_printf.9 \ DEVICE_PROBE.9 \ device_probe_and_attach.9 \ device_quiet.9 \ device_set_desc.9 \ device_set_driver.9 \ device_set_flags.9 \ DEVICE_SHUTDOWN.9 \ DEV_MODULE.9 \ dev_refthread.9 \ devctl_notify.9 \ devctl_process_running.9 \ devctl_safe_quote_sb.9 \ devstat.9 \ devtoname.9 \ disk.9 \ dnv.9 \ domain.9 \ domainset.9 \ dpcpu.9 \ drbr.9 \ driver.9 \ DRIVER_MODULE.9 \ efirt.9 \ epoch.9 \ ether_gen_addr.9 \ EVENTHANDLER.9 \ eventtimers.9 \ extattr.9 \ fail.9 \ fdt_pinctrl.9 \ fetch.9 \ firmware.9 \ fpu_kern.9 \ g_access.9 \ g_attach.9 \ g_bio.9 \ g_consumer.9 \ g_data.9 \ get_cyclecount.9 \ getenv.9 \ getnewvnode.9 \ g_event.9 \ g_geom.9 \ g_provider.9 \ g_provider_by_name.9 \ groupmember.9 \ g_wither_geom.9 \ hardclock.9 \ hash.9 \ hashinit.9 \ hexdump.9 \ hhook.9 \ hz.9 \ ieee80211.9 \ ieee80211_amrr.9 \ ieee80211_beacon.9 \ ieee80211_bmiss.9 \ ieee80211_crypto.9 \ ieee80211_ddb.9 \ ieee80211_input.9 \ ieee80211_node.9 \ ieee80211_output.9 \ ieee80211_proto.9 \ ieee80211_radiotap.9 \ ieee80211_regdomain.9 \ ieee80211_scan.9 \ ieee80211_vap.9 \ iflib.9 \ iflibdd.9 \ iflibdi.9 \ iflibtxrx.9 \ ifnet.9 \ inittodr.9 \ insmntque.9 \ intro.9 \ ithread.9 \ kasan.9 \ KASSERT.9 \ kern_testfrwk.9 \ kernacc.9 \ kernel_mount.9 \ khelp.9 \ kobj.9 \ kproc.9 \ kqueue.9 \ kthread.9 \ ktr.9 \ lock.9 \ locking.9 \ LOCK_PROFILING.9 \ mac.9 \ make_dev.9 \ malloc.9 \ mbchain.9 \ mbuf.9 \ mbuf_tags.9 \ MD5.9 \ mdchain.9 \ memcchr.9 \ memguard.9 \ microseq.9 \ microtime.9 \ microuptime.9 \ mi_switch.9 \ mod_cc.9 \ module.9 \ MODULE_DEPEND.9 \ MODULE_PNP_INFO.9 \ MODULE_VERSION.9 \ mtx_pool.9 \ mutex.9 \ namei.9 \ netisr.9 \ nv.9 \ OF_child.9 \ OF_device_from_xref.9 \ OF_finddevice.9 \ OF_getprop.9 \ OF_node_from_xref.9 \ OF_package_to_path.9 \ ofw_bus_is_compatible.9 \ ofw_bus_status_okay.9 \ osd.9 \ owll.9 \ own.9 \ panic.9 \ PCBGROUP.9 \ p_candebug.9 \ p_cansee.9 \ pci.9 \ PCI_IOV_ADD_VF.9 \ PCI_IOV_INIT.9 \ pci_iov_schema.9 \ PCI_IOV_UNINIT.9 \ pfil.9 \ pfind.9 \ pget.9 \ pgfind.9 \ PHOLD.9 \ physio.9 \ pmap.9 \ pmap_activate.9 \ pmap_clear_modify.9 \ pmap_copy.9 \ pmap_enter.9 \ pmap_extract.9 \ pmap_growkernel.9 \ pmap_init.9 \ pmap_is_modified.9 \ pmap_is_prefaultable.9 \ pmap_map.9 \ pmap_mincore.9 \ pmap_object_init_pt.9 \ pmap_page_exists_quick.9 \ pmap_page_init.9 \ pmap_pinit.9 \ pmap_protect.9 \ pmap_qenter.9 \ pmap_quick_enter_page.9 \ pmap_release.9 \ pmap_remove.9 \ pmap_resident_count.9 \ pmap_unwire.9 \ pmap_zero_page.9 \ printf.9 \ prison_check.9 \ priv.9 \ prng.9 \ proc_rwmem.9 \ pseudofs.9 \ psignal.9 \ pwmbus.9 \ random.9 \ random_harvest.9 \ ratecheck.9 \ redzone.9 \ refcount.9 \ regulator.9 \ resettodr.9 \ resource_int_value.9 \ rijndael.9 \ rman.9 \ rmlock.9 \ rtalloc.9 \ rtentry.9 \ runqueue.9 \ rwlock.9 \ sbuf.9 \ scheduler.9 \ SDT.9 \ securelevel_gt.9 \ selrecord.9 \ sema.9 \ seqc.9 \ sf_buf.9 \ sglist.9 \ shm_map.9 \ signal.9 \ sleep.9 \ sleepqueue.9 \ socket.9 \ stack.9 \ store.9 \ style.9 \ style.lua.9 \ ${_superio.9} \ swi.9 \ sx.9 \ syscall_helper_register.9 \ SYSCALL_MODULE.9 \ sysctl.9 \ sysctl_add_oid.9 \ sysctl_ctx_init.9 \ SYSINIT.9 \ taskqueue.9 \ tcp_functions.9 \ thread_exit.9 \ time.9 \ tvtohz.9 \ ucred.9 \ uidinfo.9 \ uio.9 \ unr.9 \ vaccess.9 \ vaccess_acl_nfs4.9 \ vaccess_acl_posix1e.9 \ vcount.9 \ vflush.9 \ VFS.9 \ vfs_busy.9 \ VFS_CHECKEXP.9 \ vfsconf.9 \ VFS_FHTOVP.9 \ vfs_getnewfsid.9 \ vfs_getopt.9 \ vfs_getvfs.9 \ VFS_MOUNT.9 \ vfs_mountedfrom.9 \ VFS_QUOTACTL.9 \ VFS_ROOT.9 \ vfs_rootmountalloc.9 \ VFS_SET.9 \ VFS_STATFS.9 \ vfs_suser.9 \ VFS_SYNC.9 \ vfs_timestamp.9 \ vfs_unbusy.9 \ VFS_UNMOUNT.9 \ vfs_unmountall.9 \ VFS_VGET.9 \ vget.9 \ vgone.9 \ vhold.9 \ vinvalbuf.9 \ vm_fault_prefault.9 \ vm_map.9 \ vm_map_check_protection.9 \ vm_map_delete.9 \ vm_map_entry_resize_free.9 \ vm_map_find.9 \ vm_map_findspace.9 \ vm_map_inherit.9 \ vm_map_init.9 \ vm_map_insert.9 \ vm_map_lock.9 \ vm_map_lookup.9 \ vm_map_madvise.9 \ vm_map_max.9 \ vm_map_protect.9 \ vm_map_remove.9 \ vm_map_simplify_entry.9 \ vm_map_stack.9 \ vm_map_submap.9 \ vm_map_sync.9 \ vm_map_wire.9 \ vm_page_alloc.9 \ vm_page_bits.9 \ vm_page_busy.9 \ vm_page_deactivate.9 \ vm_page_dontneed.9 \ vm_page_aflag.9 \ vm_page_free.9 \ vm_page_grab.9 \ vm_page_insert.9 \ vm_page_lookup.9 \ vm_page_rename.9 \ vm_page_wire.9 \ vm_set_page_size.9 \ vmem.9 \ vn_fullpath.9 \ vn_isdisk.9 \ vnet.9 \ vnode.9 \ vnode_pager_setsize.9 \ VOP_ACCESS.9 \ VOP_ACLCHECK.9 \ VOP_ADVISE.9 \ VOP_ADVLOCK.9 \ VOP_ALLOCATE.9 \ VOP_ATTRIB.9 \ VOP_BMAP.9 \ VOP_BWRITE.9 \ VOP_COPY_FILE_RANGE.9 \ VOP_CREATE.9 \ VOP_FSYNC.9 \ VOP_GETACL.9 \ VOP_GETEXTATTR.9 \ VOP_GETPAGES.9 \ VOP_INACTIVE.9 \ VOP_IOCTL.9 \ VOP_LINK.9 \ VOP_LISTEXTATTR.9 \ VOP_LOCK.9 \ VOP_LOOKUP.9 \ VOP_OPENCLOSE.9 \ VOP_PATHCONF.9 \ VOP_PRINT.9 \ VOP_RDWR.9 \ VOP_READ_PGCACHE.9 \ VOP_READDIR.9 \ VOP_READLINK.9 \ VOP_REALLOCBLKS.9 \ VOP_REMOVE.9 \ VOP_RENAME.9 \ VOP_REVOKE.9 \ VOP_SETACL.9 \ VOP_SETEXTATTR.9 \ VOP_STRATEGY.9 \ VOP_VPTOCNP.9 \ VOP_VPTOFH.9 \ vref.9 \ vrefcnt.9 \ vrele.9 \ vslock.9 \ watchdog.9 \ zero_region.9 \ zone.9 MLINKS= accept_filter.9 accept_filt_add.9 \ accept_filter.9 accept_filt_del.9 \ accept_filter.9 accept_filt_generic_mod_event.9 \ accept_filter.9 accept_filt_get.9 MLINKS+=alq.9 ALQ.9 \ alq.9 alq_close.9 \ alq.9 alq_flush.9 \ alq.9 alq_get.9 \ alq.9 alq_getn.9 \ alq.9 alq_open.9 \ alq.9 alq_open_flags.9 \ alq.9 alq_post.9 \ alq.9 alq_post_flags.9 \ alq.9 alq_write.9 \ alq.9 alq_writen.9 MLINKS+=altq.9 ALTQ.9 MLINKS+=atomic.9 atomic_add.9 \ atomic.9 atomic_clear.9 \ atomic.9 atomic_cmpset.9 \ atomic.9 atomic_fcmpset.9 \ atomic.9 atomic_fetchadd.9 \ atomic.9 atomic_load.9 \ atomic.9 atomic_readandclear.9 \ atomic.9 atomic_set.9 \ atomic.9 atomic_store.9 \ atomic.9 atomic_subtract.9 \ atomic.9 atomic_swap.9 \ atomic.9 atomic_testandclear.9 \ atomic.9 atomic_testandset.9 \ atomic.9 atomic_thread_fence.9 MLINKS+=bhnd.9 BHND_MATCH_BOARD_TYPE.9 \ bhnd.9 BHND_MATCH_BOARD_VENDOR.9 \ bhnd.9 BHND_MATCH_CHIP_ID.9 \ bhnd.9 BHND_MATCH_CHIP_PKG.9 \ bhnd.9 BHND_MATCH_CHIP_REV.9 \ bhnd.9 BHND_MATCH_CORE_ID.9 \ bhnd.9 BHND_MATCH_CORE_VENDOR.9 \ bhnd.9 bhnd_activate_resource.9 \ bhnd.9 bhnd_alloc_pmu.9 \ bhnd.9 bhnd_alloc_resource.9 \ bhnd.9 bhnd_alloc_resource_any.9 \ bhnd.9 bhnd_alloc_resources.9 \ bhnd.9 bhnd_board_matches.9 \ bhnd.9 bhnd_bus_match_child.9 \ bhnd.9 bhnd_bus_read_1.9 \ bhnd.9 bhnd_bus_read_2.9 \ bhnd.9 bhnd_bus_read_4.9 \ bhnd.9 bhnd_bus_read_stream_1.9 \ bhnd.9 bhnd_bus_read_stream_2.9 \ bhnd.9 bhnd_bus_read_stream_4.9 \ bhnd.9 bhnd_bus_write_1.9 \ bhnd.9 bhnd_bus_write_2.9 \ bhnd.9 bhnd_bus_write_4.9 \ bhnd.9 bhnd_bus_write_stream_1.9 \ bhnd.9 bhnd_bus_write_stream_2.9 \ bhnd.9 bhnd_bus_write_stream_4.9 \ bhnd.9 bhnd_chip_matches.9 \ bhnd.9 bhnd_core_class.9 \ bhnd.9 bhnd_core_get_match_desc.9 \ bhnd.9 bhnd_core_matches.9 \ bhnd.9 bhnd_core_name.9 \ bhnd.9 bhnd_cores_equal.9 \ bhnd.9 bhnd_deactivate_resource.9 \ bhnd.9 bhnd_decode_port_rid.9 \ bhnd.9 bhnd_deregister_provider.9 \ bhnd.9 bhnd_device_lookup.9 \ bhnd.9 bhnd_device_matches.9 \ bhnd.9 bhnd_device_quirks.9 \ bhnd.9 bhnd_driver_get_erom_class.9 \ bhnd.9 bhnd_enable_clocks.9 \ bhnd.9 bhnd_find_core_class.9 \ bhnd.9 bhnd_find_core_name.9 \ bhnd.9 bhnd_format_chip_id.9 \ bhnd.9 bhnd_get_attach_type.9 \ bhnd.9 bhnd_get_chipid.9 \ bhnd.9 bhnd_get_class.9 \ bhnd.9 bhnd_get_clock_freq.9 \ bhnd.9 bhnd_get_clock_latency.9 \ bhnd.9 bhnd_get_core_index.9 \ bhnd.9 bhnd_get_core_info.9 \ bhnd.9 bhnd_get_core_unit.9 \ bhnd.9 bhnd_get_device.9 \ bhnd.9 bhnd_get_device_name.9 \ bhnd.9 bhnd_get_dma_translation.9 \ bhnd.9 bhnd_get_hwrev.9 \ bhnd.9 bhnd_get_intr_count.9 \ bhnd.9 bhnd_get_intr_ivec.9 \ bhnd.9 bhnd_get_port_count.9 \ bhnd.9 bhnd_get_port_rid.9 \ bhnd.9 bhnd_get_region_addr.9 \ bhnd.9 bhnd_get_region_count.9 \ bhnd.9 bhnd_get_vendor.9 \ bhnd.9 bhnd_get_vendor_name.9 \ bhnd.9 bhnd_hwrev_matches.9 \ bhnd.9 bhnd_is_hw_suspended.9 \ bhnd.9 bhnd_is_region_valid.9 \ bhnd.9 bhnd_map_intr.9 \ bhnd.9 bhnd_match_core.9 \ bhnd.9 bhnd_nvram_getvar.9 \ bhnd.9 bhnd_nvram_getvar_array.9 \ bhnd.9 bhnd_nvram_getvar_int.9 \ bhnd.9 bhnd_nvram_getvar_int16.9 \ bhnd.9 bhnd_nvram_getvar_int32.9 \ bhnd.9 bhnd_nvram_getvar_int8.9 \ bhnd.9 bhnd_nvram_getvar_str.9 \ bhnd.9 bhnd_nvram_getvar_uint.9 \ bhnd.9 bhnd_nvram_getvar_uint16.9 \ bhnd.9 bhnd_nvram_getvar_uint32.9 \ bhnd.9 bhnd_nvram_getvar_uint8.9 \ bhnd.9 bhnd_nvram_string_array_next.9 \ bhnd.9 bhnd_read_board_info.9 \ bhnd.9 bhnd_read_config.9 \ bhnd.9 bhnd_read_ioctl.9 \ bhnd.9 bhnd_read_iost.9 \ bhnd.9 bhnd_register_provider.9 \ bhnd.9 bhnd_release_ext_rsrc.9 \ bhnd.9 bhnd_release_pmu.9 \ bhnd.9 bhnd_release_provider.9 \ bhnd.9 bhnd_release_resource.9 \ bhnd.9 bhnd_release_resources.9 \ bhnd.9 bhnd_request_clock.9 \ bhnd.9 bhnd_request_ext_rsrc.9 \ bhnd.9 bhnd_reset_hw.9 \ bhnd.9 bhnd_retain_provider.9 \ bhnd.9 bhnd_set_custom_core_desc.9 \ bhnd.9 bhnd_set_default_core_desc.9 \ bhnd.9 bhnd_suspend_hw.9 \ bhnd.9 bhnd_unmap_intr.9 \ bhnd.9 bhnd_vendor_name.9 \ bhnd.9 bhnd_write_config.9 \ bhnd.9 bhnd_write_ioctl.9 MLINKS+=bhnd_erom.9 bhnd_erom_alloc.9 \ bhnd_erom.9 bhnd_erom_dump.9 \ bhnd_erom.9 bhnd_erom_fini_static.9 \ bhnd_erom.9 bhnd_erom_free.9 \ bhnd_erom.9 bhnd_erom_free_core_table.9 \ bhnd_erom.9 bhnd_erom_get_core_table.9 \ bhnd_erom.9 bhnd_erom_init_static.9 \ bhnd_erom.9 bhnd_erom_io.9 \ bhnd_erom.9 bhnd_erom_io_fini.9 \ bhnd_erom.9 bhnd_erom_io_map.9 \ bhnd_erom.9 bhnd_erom_io_read.9 \ bhnd_erom.9 bhnd_erom_iobus_init.9 \ bhnd_erom.9 bhnd_erom_iores_new.9 \ bhnd_erom.9 bhnd_erom_lookup_core.9 \ bhnd_erom.9 bhnd_erom_lookup_core_addr.9 \ bhnd_erom.9 bhnd_erom_probe.9 \ bhnd_erom.9 bhnd_erom_probe_driver_classes.9 MLINKS+=bitset.9 BITSET_DEFINE.9 \ bitset.9 BITSET_T_INITIALIZER.9 \ bitset.9 BITSET_FSET.9 \ bitset.9 BIT_CLR.9 \ bitset.9 BIT_COPY.9 \ bitset.9 BIT_ISSET.9 \ bitset.9 BIT_SET.9 \ bitset.9 BIT_ZERO.9 \ bitset.9 BIT_FILL.9 \ bitset.9 BIT_SETOF.9 \ bitset.9 BIT_EMPTY.9 \ bitset.9 BIT_ISFULLSET.9 \ bitset.9 BIT_FFS.9 \ bitset.9 BIT_FFS_AT.9 \ bitset.9 BIT_FLS.9 \ bitset.9 BIT_COUNT.9 \ bitset.9 BIT_SUBSET.9 \ bitset.9 BIT_OVERLAP.9 \ bitset.9 BIT_CMP.9 \ bitset.9 BIT_OR.9 \ bitset.9 BIT_OR2.9 \ bitset.9 BIT_AND.9 \ bitset.9 BIT_AND2.9 \ bitset.9 BIT_ANDNOT.9 \ bitset.9 BIT_ANDNOT2.9 \ bitset.9 BIT_XOR.9 \ bitset.9 BIT_XOR2.9 \ bitset.9 BIT_CLR_ATOMIC.9 \ bitset.9 BIT_SET_ATOMIC.9 \ bitset.9 BIT_SET_ATOMIC_ACQ.9 \ bitset.9 BIT_TEST_SET_ATOMIC.9 \ bitset.9 BIT_TEST_CLR_ATOMIC.9 \ bitset.9 BIT_AND_ATOMIC.9 \ bitset.9 BIT_OR_ATOMIC.9 \ bitset.9 BIT_COPY_STORE_REL.9 MLINKS+=bpf.9 bpfattach.9 \ bpf.9 bpfattach2.9 \ bpf.9 bpfdetach.9 \ bpf.9 bpf_filter.9 \ bpf.9 bpf_mtap.9 \ bpf.9 bpf_mtap2.9 \ bpf.9 bpf_tap.9 \ bpf.9 bpf_validate.9 MLINKS+=buf.9 bp.9 MLINKS+=buf_ring.9 buf_ring_alloc.9 \ buf_ring.9 buf_ring_free.9 \ buf_ring.9 buf_ring_enqueue.9 \ buf_ring.9 buf_ring_enqueue_bytes.9 \ buf_ring.9 buf_ring_dequeue_mc.9 \ buf_ring.9 buf_ring_dequeue_sc.9 \ buf_ring.9 buf_ring_count.9 \ buf_ring.9 buf_ring_empty.9 \ buf_ring.9 buf_ring_full.9 \ buf_ring.9 buf_ring_peek.9 MLINKS+=bus_activate_resource.9 bus_deactivate_resource.9 MLINKS+=bus_alloc_resource.9 bus_alloc_resource_any.9 MLINKS+=BUS_BIND_INTR.9 bus_bind_intr.9 MLINKS+=BUS_DESCRIBE_INTR.9 bus_describe_intr.9 MLINKS+=bus_dma.9 busdma.9 \ bus_dma.9 bus_dmamap_create.9 \ bus_dma.9 bus_dmamap_destroy.9 \ bus_dma.9 bus_dmamap_load.9 \ bus_dma.9 bus_dmamap_load_bio.9 \ bus_dma.9 bus_dmamap_load_ccb.9 \ bus_dma.9 bus_dmamap_load_crp.9 \ bus_dma.9 bus_dmamap_load_crp_buffer.9 \ bus_dma.9 bus_dmamap_load_mbuf.9 \ bus_dma.9 bus_dmamap_load_mbuf_sg.9 \ bus_dma.9 bus_dmamap_load_uio.9 \ bus_dma.9 bus_dmamap_sync.9 \ bus_dma.9 bus_dmamap_unload.9 \ bus_dma.9 bus_dmamem_alloc.9 \ bus_dma.9 bus_dmamem_free.9 \ bus_dma.9 bus_dma_tag_create.9 \ bus_dma.9 bus_dma_tag_destroy.9 MLINKS+=bus_generic_read_ivar.9 bus_generic_write_ivar.9 MLINKS+=BUS_GET_CPUS.9 bus_get_cpus.9 MLINKS+=bus_map_resource.9 bus_unmap_resource.9 \ bus_map_resource.9 resource_init_map_request.9 MLINKS+=BUS_READ_IVAR.9 BUS_WRITE_IVAR.9 MLINKS+=BUS_SETUP_INTR.9 bus_setup_intr.9 \ BUS_SETUP_INTR.9 BUS_TEARDOWN_INTR.9 \ BUS_SETUP_INTR.9 bus_teardown_intr.9 MLINKS+=bus_space.9 bus_space_alloc.9 \ bus_space.9 bus_space_barrier.9 \ bus_space.9 bus_space_copy_region_1.9 \ bus_space.9 bus_space_copy_region_2.9 \ bus_space.9 bus_space_copy_region_4.9 \ bus_space.9 bus_space_copy_region_8.9 \ bus_space.9 bus_space_copy_region_stream_1.9 \ bus_space.9 bus_space_copy_region_stream_2.9 \ bus_space.9 bus_space_copy_region_stream_4.9 \ bus_space.9 bus_space_copy_region_stream_8.9 \ bus_space.9 bus_space_free.9 \ bus_space.9 bus_space_map.9 \ bus_space.9 bus_space_read_1.9 \ bus_space.9 bus_space_read_2.9 \ bus_space.9 bus_space_read_4.9 \ bus_space.9 bus_space_read_8.9 \ bus_space.9 bus_space_read_multi_1.9 \ bus_space.9 bus_space_read_multi_2.9 \ bus_space.9 bus_space_read_multi_4.9 \ bus_space.9 bus_space_read_multi_8.9 \ bus_space.9 bus_space_read_multi_stream_1.9 \ bus_space.9 bus_space_read_multi_stream_2.9 \ bus_space.9 bus_space_read_multi_stream_4.9 \ bus_space.9 bus_space_read_multi_stream_8.9 \ bus_space.9 bus_space_read_region_1.9 \ bus_space.9 bus_space_read_region_2.9 \ bus_space.9 bus_space_read_region_4.9 \ bus_space.9 bus_space_read_region_8.9 \ bus_space.9 bus_space_read_region_stream_1.9 \ bus_space.9 bus_space_read_region_stream_2.9 \ bus_space.9 bus_space_read_region_stream_4.9 \ bus_space.9 bus_space_read_region_stream_8.9 \ bus_space.9 bus_space_read_stream_1.9 \ bus_space.9 bus_space_read_stream_2.9 \ bus_space.9 bus_space_read_stream_4.9 \ bus_space.9 bus_space_read_stream_8.9 \ bus_space.9 bus_space_set_multi_1.9 \ bus_space.9 bus_space_set_multi_2.9 \ bus_space.9 bus_space_set_multi_4.9 \ bus_space.9 bus_space_set_multi_8.9 \ bus_space.9 bus_space_set_multi_stream_1.9 \ bus_space.9 bus_space_set_multi_stream_2.9 \ bus_space.9 bus_space_set_multi_stream_4.9 \ bus_space.9 bus_space_set_multi_stream_8.9 \ bus_space.9 bus_space_set_region_1.9 \ bus_space.9 bus_space_set_region_2.9 \ bus_space.9 bus_space_set_region_4.9 \ bus_space.9 bus_space_set_region_8.9 \ bus_space.9 bus_space_set_region_stream_1.9 \ bus_space.9 bus_space_set_region_stream_2.9 \ bus_space.9 bus_space_set_region_stream_4.9 \ bus_space.9 bus_space_set_region_stream_8.9 \ bus_space.9 bus_space_subregion.9 \ bus_space.9 bus_space_unmap.9 \ bus_space.9 bus_space_write_1.9 \ bus_space.9 bus_space_write_2.9 \ bus_space.9 bus_space_write_4.9 \ bus_space.9 bus_space_write_8.9 \ bus_space.9 bus_space_write_multi_1.9 \ bus_space.9 bus_space_write_multi_2.9 \ bus_space.9 bus_space_write_multi_4.9 \ bus_space.9 bus_space_write_multi_8.9 \ bus_space.9 bus_space_write_multi_stream_1.9 \ bus_space.9 bus_space_write_multi_stream_2.9 \ bus_space.9 bus_space_write_multi_stream_4.9 \ bus_space.9 bus_space_write_multi_stream_8.9 \ bus_space.9 bus_space_write_region_1.9 \ bus_space.9 bus_space_write_region_2.9 \ bus_space.9 bus_space_write_region_4.9 \ bus_space.9 bus_space_write_region_8.9 \ bus_space.9 bus_space_write_region_stream_1.9 \ bus_space.9 bus_space_write_region_stream_2.9 \ bus_space.9 bus_space_write_region_stream_4.9 \ bus_space.9 bus_space_write_region_stream_8.9 \ bus_space.9 bus_space_write_stream_1.9 \ bus_space.9 bus_space_write_stream_2.9 \ bus_space.9 bus_space_write_stream_4.9 \ bus_space.9 bus_space_write_stream_8.9 MLINKS+=byteorder.9 be16dec.9 \ byteorder.9 be16enc.9 \ byteorder.9 be16toh.9 \ byteorder.9 be32dec.9 \ byteorder.9 be32enc.9 \ byteorder.9 be32toh.9 \ byteorder.9 be64dec.9 \ byteorder.9 be64enc.9 \ byteorder.9 be64toh.9 \ byteorder.9 bswap16.9 \ byteorder.9 bswap32.9 \ byteorder.9 bswap64.9 \ byteorder.9 htobe16.9 \ byteorder.9 htobe32.9 \ byteorder.9 htobe64.9 \ byteorder.9 htole16.9 \ byteorder.9 htole32.9 \ byteorder.9 htole64.9 \ byteorder.9 le16dec.9 \ byteorder.9 le16enc.9 \ byteorder.9 le16toh.9 \ byteorder.9 le32dec.9 \ byteorder.9 le32enc.9 \ byteorder.9 le32toh.9 \ byteorder.9 le64dec.9 \ byteorder.9 le64enc.9 \ byteorder.9 le64toh.9 MLINKS+=callout.9 callout_active.9 \ callout.9 callout_async_drain.9 \ callout.9 callout_deactivate.9 \ callout.9 callout_drain.9 \ callout.9 callout_init.9 \ callout.9 callout_init_mtx.9 \ callout.9 callout_init_rm.9 \ callout.9 callout_init_rw.9 \ callout.9 callout_pending.9 \ callout.9 callout_reset.9 \ callout.9 callout_reset_curcpu.9 \ callout.9 callout_reset_on.9 \ callout.9 callout_reset_sbt.9 \ callout.9 callout_reset_sbt_curcpu.9 \ callout.9 callout_reset_sbt_on.9 \ callout.9 callout_schedule.9 \ callout.9 callout_schedule_curcpu.9 \ callout.9 callout_schedule_on.9 \ callout.9 callout_schedule_sbt.9 \ callout.9 callout_schedule_sbt_curcpu.9 \ callout.9 callout_schedule_sbt_on.9 \ callout.9 callout_stop.9 \ callout.9 callout_when.9 MLINKS+=cnv.9 cnvlist.9 \ cnv.9 cnvlist_free_binary.9 \ cnv.9 cnvlist_free_bool.9 \ cnv.9 cnvlist_free_bool_array.9 \ cnv.9 cnvlist_free_descriptor.9 \ cnv.9 cnvlist_free_descriptor_array.9 \ cnv.9 cnvlist_free_null.9 \ cnv.9 cnvlist_free_number.9 \ cnv.9 cnvlist_free_number_array.9 \ cnv.9 cnvlist_free_nvlist.9 \ cnv.9 cnvlist_free_nvlist_array.9 \ cnv.9 cnvlist_free_string.9 \ cnv.9 cnvlist_free_string_array.9 \ cnv.9 cnvlist_get_binary.9 \ cnv.9 cnvlist_get_bool.9 \ cnv.9 cnvlist_get_bool_array.9 \ cnv.9 cnvlist_get_descriptor.9 \ cnv.9 cnvlist_get_descriptor_array.9 \ cnv.9 cnvlist_get_number.9 \ cnv.9 cnvlist_get_number_array.9 \ cnv.9 cnvlist_get_nvlist.9 \ cnv.9 cnvlist_get_nvlist_array.9 \ cnv.9 cnvlist_get_string.9 \ cnv.9 cnvlist_get_string_array.9 \ cnv.9 cnvlist_take_binary.9 \ cnv.9 cnvlist_take_bool.9 \ cnv.9 cnvlist_take_bool_array.9 \ cnv.9 cnvlist_take_descriptor.9 \ cnv.9 cnvlist_take_descriptor_array.9 \ cnv.9 cnvlist_take_number.9 \ cnv.9 cnvlist_take_number_array.9 \ cnv.9 cnvlist_take_nvlist.9 \ cnv.9 cnvlist_take_nvlist_array.9 \ cnv.9 cnvlist_take_string.9 \ cnv.9 cnvlist_take_string_array.9 MLINKS+=condvar.9 cv_broadcast.9 \ condvar.9 cv_broadcastpri.9 \ condvar.9 cv_destroy.9 \ condvar.9 cv_init.9 \ condvar.9 cv_signal.9 \ condvar.9 cv_timedwait.9 \ condvar.9 cv_timedwait_sig.9 \ condvar.9 cv_timedwait_sig_sbt.9 \ condvar.9 cv_wait.9 \ condvar.9 cv_wait_sig.9 \ condvar.9 cv_wait_unlock.9 \ condvar.9 cv_wmesg.9 MLINKS+=config_intrhook.9 config_intrhook_disestablish.9 \ config_intrhook.9 config_intrhook_drain.9 \ config_intrhook.9 config_intrhook_establish.9 \ config_intrhook.9 config_intrhook_oneshot.9 MLINKS+=contigmalloc.9 contigmalloc_domainset.9 \ contigmalloc.9 contigfree.9 MLINKS+=casuword.9 casueword.9 \ casuword.9 casueword32.9 \ casuword.9 casuword32.9 MLINKS+=copy.9 copyin.9 \ copy.9 copyin_nofault.9 \ copy.9 copyinstr.9 \ copy.9 copyout.9 \ copy.9 copyout_nofault.9 \ copy.9 copystr.9 MLINKS+=counter.9 counter_u64_alloc.9 \ counter.9 counter_u64_free.9 \ counter.9 counter_u64_add.9 \ counter.9 counter_enter.9 \ counter.9 counter_exit.9 \ counter.9 counter_u64_add_protected.9 \ counter.9 counter_u64_fetch.9 \ counter.9 counter_u64_zero.9 \ counter.9 SYSCTL_COUNTER_U64.9 \ counter.9 SYSCTL_ADD_COUNTER_U64.9 \ counter.9 SYSCTL_COUNTER_U64_ARRAY.9 \ counter.9 SYSCTL_ADD_COUNTER_U64_ARRAY.9 MLINKS+=cpuset.9 CPUSET_T_INITIALIZER.9 \ cpuset.9 CPUSET_FSET.9 \ cpuset.9 CPU_CLR.9 \ cpuset.9 CPU_COPY.9 \ cpuset.9 CPU_ISSET.9 \ cpuset.9 CPU_SET.9 \ cpuset.9 CPU_ZERO.9 \ cpuset.9 CPU_FILL.9 \ cpuset.9 CPU_SETOF.9 \ cpuset.9 CPU_EMPTY.9 \ cpuset.9 CPU_ISFULLSET.9 \ cpuset.9 CPU_FFS.9 \ cpuset.9 CPU_COUNT.9 \ cpuset.9 CPU_SUBSET.9 \ cpuset.9 CPU_OVERLAP.9 \ cpuset.9 CPU_CMP.9 \ cpuset.9 CPU_OR.9 \ cpuset.9 CPU_AND.9 \ cpuset.9 CPU_ANDNOT.9 \ cpuset.9 CPU_CLR_ATOMIC.9 \ cpuset.9 CPU_SET_ATOMIC.9 \ cpuset.9 CPU_SET_ATOMIC_ACQ.9 \ cpuset.9 CPU_AND_ATOMIC.9 \ cpuset.9 CPU_OR_ATOMIC.9 \ cpuset.9 CPU_COPY_STORE_REL.9 MLINKS+=critical_enter.9 critical.9 \ critical_enter.9 critical_exit.9 MLINKS+=crypto_buffer.9 crypto_apply.9 \ crypto_buffer.9 crypto_apply_buf.9 \ crypto_buffer.9 crypto_buffer_contiguous_segment.9 \ crypto_buffer.9 crypto_buffer_len.9 \ crypto_buffer.9 crypto_contiguous_segment.9 \ crypto_buffer.9 crypto_cursor_init.9 \ crypto_buffer.9 crypto_cursor_advance.9 \ crypto_buffer.9 crypto_cursor_copyback.9 \ crypto_buffer.9 crypto_cursor_copydata.9 \ crypto_buffer.9 crypto_cursor_copydata_noadv.9 \ crypto_buffer.9 crypto_cursor_segment.9 \ crypto_buffer.9 CRYPTO_HAS_OUTPUT_BUFFER.9 MLINKS+=crypto_driver.9 crypto_copyback.9 \ crypto_driver.9 crypto_copydata.9 \ crypto_driver.9 crypto_done.9 \ crypto_driver.9 crypto_get_driverid.9 \ crypto_driver.9 crypto_get_driver_session.9 \ crypto_driver.9 crypto_read_iv.9 \ crypto_driver.9 crypto_unblock.9 \ crypto_driver.9 crypto_unregister_all.9 \ crypto_driver.9 CRYPTODEV_FREESESSION.9 \ crypto_driver.9 CRYPTODEV_NEWSESSION.9 \ crypto_driver.9 CRYPTODEV_PROBESESSION.9 \ crypto_driver.9 CRYPTODEV_PROCESS.9 \ crypto_driver.9 hmac_init_ipad.9 \ crypto_driver.9 hmac_init_opad.9 MLINKS+=crypto_request.9 crypto_destroyreq.9 \ crypto_request.9 crypto_dispatch.9 \ crypto_request.9 crypto_freereq.9 \ crypto_request.9 crypto_getreq.9 \ crypto_request.9 crypto_initreq.9 \ crypto_request.9 crypto_use_buf.9 \ crypto_request.9 crypto_use_mbuf.9 \ crypto_request.9 crypto_use_output_buf.9 \ crypto_request.9 crypto_use_output_mbuf.9 \ crypto_request.9 crypto_use_output_uio.9 \ crypto_request.9 crypto_use_uio.9 MLINKS+=crypto_session.9 crypto_auth_hash.9 \ crypto_session.9 crypto_cipher.9 \ crypto_session.9 crypto_get_params.9 \ crypto_session.9 crypto_newsession.9 \ crypto_session.9 crypto_freesession.9 MLINKS+=DB_COMMAND.9 DB_SHOW_ALL_COMMAND.9 \ DB_COMMAND.9 DB_SHOW_COMMAND.9 MLINKS+=DECLARE_MODULE.9 DECLARE_MODULE_TIED.9 MLINKS+=dev_clone.9 drain_dev_clone_events.9 MLINKS+=dev_refthread.9 devvn_refthread.9 \ dev_refthread.9 dev_relthread.9 MLINKS+=devfs_set_cdevpriv.9 devfs_clear_cdevpriv.9 \ devfs_set_cdevpriv.9 devfs_get_cdevpriv.9 MLINKS+=device_add_child.9 device_add_child_ordered.9 MLINKS+=device_enable.9 device_disable.9 \ device_enable.9 device_is_enabled.9 MLINKS+=device_get_ivars.9 device_set_ivars.9 MLINKS+=device_get_name.9 device_get_nameunit.9 MLINKS+=device_get_state.9 device_busy.9 \ device_get_state.9 device_is_alive.9 \ device_get_state.9 device_is_attached.9 \ device_get_state.9 device_unbusy.9 MLINKS+=device_get_sysctl.9 device_get_sysctl_ctx.9 \ device_get_sysctl.9 device_get_sysctl_tree.9 MLINKS+=device_quiet.9 device_is_quiet.9 \ device_quiet.9 device_verbose.9 MLINKS+=device_set_desc.9 device_get_desc.9 \ device_set_desc.9 device_set_desc_copy.9 MLINKS+=device_set_flags.9 device_get_flags.9 MLINKS+=devstat.9 devicestat.9 \ devstat.9 devstat_new_entry.9 \ devstat.9 devstat_end_transaction.9 \ devstat.9 devstat_end_transaction_bio.9 \ devstat.9 devstat_remove_entry.9 \ devstat.9 devstat_start_transaction.9 \ devstat.9 devstat_start_transaction_bio.9 MLINKS+=disk.9 disk_add_alias.9 \ disk.9 disk_alloc.9 \ disk.9 disk_create.9 \ disk.9 disk_destroy.9 \ disk.9 disk_gone.9 \ disk.9 disk_resize.9 MLINKS+=dnv.9 dnvlist.9 \ dnv.9 dnvlist_get_binary.9 \ dnv.9 dnvlist_get_bool.9 \ dnv.9 dnvlist_get_descriptor.9 \ dnv.9 dnvlist_get_number.9 \ dnv.9 dnvlist_get_nvlist.9 \ dnv.9 dnvlist_get_string.9 \ dnv.9 dnvlist_take_binary.9 \ dnv.9 dnvlist_take_bool.9 \ dnv.9 dnvlist_take_descriptor.9 \ dnv.9 dnvlist_take_number.9 \ dnv.9 dnvlist_take_nvlist.9 \ dnv.9 dnvlist_take_string.9 MLINKS+=domain.9 DOMAIN_SET.9 \ domain.9 domain_add.9 \ domain.9 pfctlinput.9 \ domain.9 pffinddomain.9 \ domain.9 pffindproto.9 \ domain.9 pffindtype.9 MLINKS+=drbr.9 drbr_free.9 \ drbr.9 drbr_enqueue.9 \ drbr.9 drbr_dequeue.9 \ drbr.9 drbr_dequeue_cond.9 \ drbr.9 drbr_flush.9 \ drbr.9 drbr_empty.9 \ drbr.9 drbr_inuse.9 \ drbr.9 drbr_stats_update.9 MLINKS+=DRIVER_MODULE.9 DRIVER_MODULE_ORDERED.9 \ DRIVER_MODULE.9 EARLY_DRIVER_MODULE.9 \ DRIVER_MODULE.9 EARLY_DRIVER_MODULE_ORDERED.9 MLINKS+=epoch.9 epoch_context.9 \ epoch.9 epoch_alloc.9 \ epoch.9 epoch_free.9 \ epoch.9 epoch_enter.9 \ epoch.9 epoch_exit.9 \ epoch.9 epoch_wait.9 \ epoch.9 epoch_call.9 \ epoch.9 epoch_drain_callbacks.9 \ epoch.9 in_epoch.9 MLINKS+=EVENTHANDLER.9 EVENTHANDLER_DECLARE.9 \ EVENTHANDLER.9 EVENTHANDLER_DEFINE.9 \ EVENTHANDLER.9 EVENTHANDLER_DEREGISTER.9 \ EVENTHANDLER.9 eventhandler_deregister.9 \ EVENTHANDLER.9 eventhandler_find_list.9 \ EVENTHANDLER.9 EVENTHANDLER_INVOKE.9 \ EVENTHANDLER.9 eventhandler_prune_list.9 \ EVENTHANDLER.9 EVENTHANDLER_REGISTER.9 \ EVENTHANDLER.9 eventhandler_register.9 MLINKS+=eventtimers.9 et_register.9 \ eventtimers.9 et_deregister.9 \ eventtimers.9 et_ban.9 \ eventtimers.9 et_find.9 \ eventtimers.9 et_free.9 \ eventtimers.9 et_init.9 \ eventtimers.9 ET_LOCK.9 \ eventtimers.9 ET_UNLOCK.9 \ eventtimers.9 et_start.9 \ eventtimers.9 et_stop.9 MLINKS+=fail.9 KFAIL_POINT_CODE.9 \ fail.9 KFAIL_POINT_ERROR.9 \ fail.9 KFAIL_POINT_GOTO.9 \ fail.9 KFAIL_POINT_RETURN.9 \ fail.9 KFAIL_POINT_RETURN_VOID.9 MLINKS+=fdt_pinctrl.9 fdt_pinctrl_configure.9 \ fdt_pinctrl.9 fdt_pinctrl_configure_by_name.9 \ fdt_pinctrl.9 fdt_pinctrl_configure_tree.9 \ fdt_pinctrl.9 fdt_pinctrl_register.9 MLINKS+=fetch.9 fubyte.9 \ fetch.9 fuword.9 \ fetch.9 fuword16.9 \ fetch.9 fuword32.9 \ fetch.9 fuword64.9 \ fetch.9 fueword.9 \ fetch.9 fueword32.9 \ fetch.9 fueword64.9 MLINKS+=firmware.9 firmware_get.9 \ firmware.9 firmware_put.9 \ firmware.9 firmware_register.9 \ firmware.9 firmware_unregister.9 MLINKS+=fpu_kern.9 fpu_kern_alloc_ctx.9 \ fpu_kern.9 fpu_kern_free_ctx.9 \ fpu_kern.9 fpu_kern_enter.9 \ fpu_kern.9 fpu_kern_leave.9 \ fpu_kern.9 fpu_kern_thread.9 \ fpu_kern.9 is_fpu_kern_thread.9 MLINKS+=g_attach.9 g_detach.9 MLINKS+=g_bio.9 g_alloc_bio.9 \ g_bio.9 g_clone_bio.9 \ g_bio.9 g_destroy_bio.9 \ g_bio.9 g_duplicate_bio.9 \ g_bio.9 g_format_bio.9 \ g_bio.9 g_new_bio.9 \ g_bio.9 g_print_bio.9 \ g_bio.9 g_reset_bio.9 MLINKS+=g_consumer.9 g_destroy_consumer.9 \ g_consumer.9 g_new_consumer.9 MLINKS+=g_data.9 g_read_data.9 \ g_data.9 g_write_data.9 MLINKS+=getenv.9 freeenv.9 \ getenv.9 getenv_int.9 \ getenv.9 getenv_long.9 \ getenv.9 getenv_string.9 \ getenv.9 getenv_quad.9 \ getenv.9 getenv_uint.9 \ getenv.9 getenv_ulong.9 \ getenv.9 getenv_bool.9 \ getenv.9 getenv_is_true.9 \ getenv.9 getenv_is_false.9 \ getenv.9 kern_getenv.9 \ getenv.9 kern_setenv.9 \ getenv.9 kern_unsetenv.9 \ getenv.9 setenv.9 \ getenv.9 testenv.9 \ getenv.9 unsetenv.9 MLINKS+=g_event.9 g_cancel_event.9 \ g_event.9 g_post_event.9 \ g_event.9 g_waitfor_event.9 MLINKS+=g_geom.9 g_destroy_geom.9 \ g_geom.9 g_new_geomf.9 MLINKS+=g_provider.9 g_destroy_provider.9 \ g_provider.9 g_error_provider.9 \ g_provider.9 g_new_providerf.9 MLINKS+=hash.9 hash32.9 \ hash.9 hash32_buf.9 \ hash.9 hash32_str.9 \ hash.9 hash32_stre.9 \ hash.9 hash32_strn.9 \ hash.9 hash32_strne.9 \ hash.9 jenkins_hash.9 \ hash.9 jenkins_hash32.9 MLINKS+=hashinit.9 hashdestroy.9 \ hashinit.9 hashinit_flags.9 \ hashinit.9 phashinit.9 MLINKS+=hhook.9 hhook_head_register.9 \ hhook.9 hhook_head_deregister.9 \ hhook.9 hhook_head_deregister_lookup.9 \ hhook.9 hhook_run_hooks.9 \ hhook.9 HHOOKS_RUN_IF.9 \ hhook.9 HHOOKS_RUN_LOOKUP_IF.9 MLINKS+=hz.9 tick.9 MLINKS+=ieee80211.9 ieee80211_ifattach.9 \ ieee80211.9 ieee80211_ifdetach.9 MLINKS+=ieee80211_amrr.9 ieee80211_amrr_choose.9 \ ieee80211_amrr.9 ieee80211_amrr_cleanup.9 \ ieee80211_amrr.9 ieee80211_amrr_init.9 \ ieee80211_amrr.9 ieee80211_amrr_node_init.9 \ ieee80211_amrr.9 ieee80211_amrr_setinterval.9 \ ieee80211_amrr.9 ieee80211_amrr_tx_complete.9 \ ieee80211_amrr.9 ieee80211_amrr_tx_update.9 MLINKS+=ieee80211_beacon.9 ieee80211_beacon_alloc.9 \ ieee80211_beacon.9 ieee80211_beacon_notify.9 \ ieee80211_beacon.9 ieee80211_beacon_update.9 MLINKS+=ieee80211_bmiss.9 ieee80211_beacon_miss.9 MLINKS+=ieee80211_crypto.9 ieee80211_crypto_available.9 \ ieee80211_crypto.9 ieee80211_crypto_decap.9 \ ieee80211_crypto.9 ieee80211_crypto_delglobalkeys.9 \ ieee80211_crypto.9 ieee80211_crypto_delkey.9 \ ieee80211_crypto.9 ieee80211_crypto_demic.9 \ ieee80211_crypto.9 ieee80211_crypto_encap.9 \ ieee80211_crypto.9 ieee80211_crypto_enmic.9 \ ieee80211_crypto.9 ieee80211_crypto_newkey.9 \ ieee80211_crypto.9 ieee80211_crypto_register.9 \ ieee80211_crypto.9 ieee80211_crypto_reload_keys.9 \ ieee80211_crypto.9 ieee80211_crypto_setkey.9 \ ieee80211_crypto.9 ieee80211_crypto_unregister.9 \ ieee80211_crypto.9 ieee80211_key_update_begin.9 \ ieee80211_crypto.9 ieee80211_key_update_end.9 \ ieee80211_crypto.9 ieee80211_notify_michael_failure.9 \ ieee80211_crypto.9 ieee80211_notify_replay_failure.9 MLINKS+=ieee80211_input.9 ieee80211_input_all.9 MLINKS+=ieee80211_node.9 ieee80211_dump_node.9 \ ieee80211_node.9 ieee80211_dump_nodes.9 \ ieee80211_node.9 ieee80211_find_rxnode.9 \ ieee80211_node.9 ieee80211_find_rxnode_withkey.9 \ ieee80211_node.9 ieee80211_free_node.9 \ ieee80211_node.9 ieee80211_iterate_nodes.9 \ ieee80211_node.9 ieee80211_ref_node.9 \ ieee80211_node.9 ieee80211_unref_node.9 MLINKS+=ieee80211_output.9 ieee80211_process_callback.9 \ ieee80211_output.9 M_SEQNO_GET.9 \ ieee80211_output.9 M_WME_GETAC.9 MLINKS+=ieee80211_proto.9 ieee80211_new_state.9 \ ieee80211_proto.9 ieee80211_resume_all.9 \ ieee80211_proto.9 ieee80211_start_all.9 \ ieee80211_proto.9 ieee80211_stop_all.9 \ ieee80211_proto.9 ieee80211_suspend_all.9 \ ieee80211_proto.9 ieee80211_waitfor_parent.9 MLINKS+=ieee80211_radiotap.9 ieee80211_radiotap_active.9 \ ieee80211_radiotap.9 ieee80211_radiotap_active_vap.9 \ ieee80211_radiotap.9 ieee80211_radiotap_attach.9 \ ieee80211_radiotap.9 ieee80211_radiotap_tx.9 \ ieee80211_radiotap.9 radiotap.9 MLINKS+=ieee80211_regdomain.9 ieee80211_alloc_countryie.9 \ ieee80211_regdomain.9 ieee80211_init_channels.9 \ ieee80211_regdomain.9 ieee80211_sort_channels.9 MLINKS+=ieee80211_scan.9 ieee80211_add_scan.9 \ ieee80211_scan.9 ieee80211_bg_scan.9 \ ieee80211_scan.9 ieee80211_cancel_scan.9 \ ieee80211_scan.9 ieee80211_cancel_scan_any.9 \ ieee80211_scan.9 ieee80211_check_scan.9 \ ieee80211_scan.9 ieee80211_check_scan_current.9 \ ieee80211_scan.9 ieee80211_flush.9 \ ieee80211_scan.9 ieee80211_probe_curchan.9 \ ieee80211_scan.9 ieee80211_scan_assoc_fail.9 \ ieee80211_scan.9 ieee80211_scan_done.9 \ ieee80211_scan.9 ieee80211_scan_dump_channels.9 \ ieee80211_scan.9 ieee80211_scan_flush.9 \ ieee80211_scan.9 ieee80211_scan_iterate.9 \ ieee80211_scan.9 ieee80211_scan_next.9 \ ieee80211_scan.9 ieee80211_scan_timeout.9 \ ieee80211_scan.9 ieee80211_scanner_get.9 \ ieee80211_scan.9 ieee80211_scanner_register.9 \ ieee80211_scan.9 ieee80211_scanner_unregister.9 \ ieee80211_scan.9 ieee80211_scanner_unregister_all.9 \ ieee80211_scan.9 ieee80211_start_scan.9 MLINKS+=ieee80211_vap.9 ieee80211_vap_attach.9 \ ieee80211_vap.9 ieee80211_vap_detach.9 \ ieee80211_vap.9 ieee80211_vap_setup.9 MLINKS+=iflibdd.9 ifdi_attach_pre.9 \ iflibdd.9 ifdi_attach_post.9 \ iflibdd.9 ifdi_detach.9 \ iflibdd.9 ifdi_get_counter.9 \ iflibdd.9 ifdi_i2c_req.9 \ iflibdd.9 ifdi_init.9 \ iflibdd.9 ifdi_intr_enable.9 \ iflibdd.9 ifdi_intr_disable.9 \ iflibdd.9 ifdi_led_func.9 \ iflibdd.9 ifdi_link_intr_enable.9 \ iflibdd.9 ifdi_media_set.9 \ iflibdd.9 ifdi_media_status.9 \ iflibdd.9 ifdi_media_change.9 \ iflibdd.9 ifdi_mtu_set.9 \ iflibdd.9 ifdi_multi_set.9 \ iflibdd.9 ifdi_promisc_set.9 \ iflibdd.9 ifdi_queues_alloc.9 \ iflibdd.9 ifdi_queues_free.9 \ iflibdd.9 ifdi_queue_intr_enable.9 \ iflibdd.9 ifdi_resume.9 \ iflibdd.9 ifdi_rxq_setup.9 \ iflibdd.9 ifdi_stop.9 \ iflibdd.9 ifdi_suspend.9 \ iflibdd.9 ifdi_sysctl_int_delay.9 \ iflibdd.9 ifdi_timer.9 \ iflibdd.9 ifdi_txq_setup.9 \ iflibdd.9 ifdi_update_admin_status.9 \ iflibdd.9 ifdi_vf_add.9 \ iflibdd.9 ifdi_vflr_handle.9 \ iflibdd.9 ifdi_vlan_register.9 \ iflibdd.9 ifdi_vlan_unregister.9 \ iflibdd.9 ifdi_watchdog_reset.9 \ iflibdd.9 iov_init.9 \ iflibdd.9 iov_uinit.9 MLINKS+=iflibdi.9 iflib_add_int_delay_sysctl.9 \ iflibdi.9 iflib_device_attach.9 \ iflibdi.9 iflib_device_deregister.9 \ iflibdi.9 iflib_device_detach.9 \ iflibdi.9 iflib_device_suspend.9 \ iflibdi.9 iflib_device_register.9 \ iflibdi.9 iflib_device_resume.9 \ iflibdi.9 iflib_led_create.9 \ iflibdi.9 iflib_irq_alloc.9 \ iflibdi.9 iflib_irq_alloc_generic.9 \ iflibdi.9 iflib_link_intr_deferred.9 \ iflibdi.9 iflib_link_state_change.9 \ iflibdi.9 iflib_rx_intr_deferred.9 \ iflibdi.9 iflib_tx_intr_deferred.9 MLINKS+=iflibtxrx.9 isc_rxd_available.9 \ iflibtxrx.9 isc_rxd_refill.9 \ iflibtxrx.9 isc_rxd_flush.9 \ iflibtxrx.9 isc_rxd_pkt_get.9 \ iflibtxrx.9 isc_txd_credits_update.9 \ iflibtxrx.9 isc_txd_encap.9 \ iflibtxrx.9 isc_txd_flush.9 MLINKS+=ifnet.9 if_addmulti.9 \ ifnet.9 if_alloc.9 \ ifnet.9 if_alloc_dev.9 \ ifnet.9 if_alloc_domain.9 \ ifnet.9 if_allmulti.9 \ ifnet.9 if_attach.9 \ ifnet.9 if_data.9 \ ifnet.9 IF_DEQUEUE.9 \ ifnet.9 if_delmulti.9 \ ifnet.9 if_detach.9 \ ifnet.9 if_down.9 \ ifnet.9 if_findmulti.9 \ ifnet.9 if_free.9 \ ifnet.9 if_free_type.9 \ ifnet.9 if_up.9 \ ifnet.9 ifa_free.9 \ ifnet.9 ifa_ifwithaddr.9 \ ifnet.9 ifa_ifwithdstaddr.9 \ ifnet.9 ifa_ifwithnet.9 \ ifnet.9 ifa_ref.9 \ ifnet.9 ifaddr.9 \ ifnet.9 ifaddr_byindex.9 \ ifnet.9 ifaof_ifpforaddr.9 \ ifnet.9 ifioctl.9 \ ifnet.9 ifpromisc.9 \ ifnet.9 ifqueue.9 \ ifnet.9 ifunit.9 \ ifnet.9 ifunit_ref.9 MLINKS+=insmntque.9 insmntque1.9 MLINKS+=ithread.9 ithread_add_handler.9 \ ithread.9 ithread_create.9 \ ithread.9 ithread_destroy.9 \ ithread.9 ithread_priority.9 \ ithread.9 ithread_remove_handler.9 \ ithread.9 ithread_schedule.9 MLINKS+=kernacc.9 useracc.9 MLINKS+=kernel_mount.9 free_mntarg.9 \ kernel_mount.9 kernel_vmount.9 \ kernel_mount.9 mount_arg.9 \ kernel_mount.9 mount_argb.9 \ kernel_mount.9 mount_argf.9 \ kernel_mount.9 mount_argsu.9 MLINKS+=kasan.9 KASAN.9 \ kasan.9 kasan_mark.9 MLINKS+=khelp.9 khelp_add_hhook.9 \ khelp.9 KHELP_DECLARE_MOD.9 \ khelp.9 KHELP_DECLARE_MOD_UMA.9 \ khelp.9 khelp_destroy_osd.9 \ khelp.9 khelp_get_id.9 \ khelp.9 khelp_get_osd.9 \ khelp.9 khelp_init_osd.9 \ khelp.9 khelp_remove_hhook.9 MLINKS+=kobj.9 DEFINE_CLASS.9 \ kobj.9 kobj_class_compile.9 \ kobj.9 kobj_class_compile_static.9 \ kobj.9 kobj_class_free.9 \ kobj.9 kobj_create.9 \ kobj.9 kobj_delete.9 \ kobj.9 kobj_init.9 \ kobj.9 kobj_init_static.9 MLINKS+=kproc.9 kproc_create.9 \ kproc.9 kproc_exit.9 \ kproc.9 kproc_kthread_add.9 \ kproc.9 kproc_resume.9 \ kproc.9 kproc_shutdown.9 \ kproc.9 kproc_start.9 \ kproc.9 kproc_suspend.9 \ kproc.9 kproc_suspend_check.9 \ kproc.9 kthread_create.9 MLINKS+=kqueue.9 knlist_add.9 \ kqueue.9 knlist_clear.9 \ kqueue.9 knlist_delete.9 \ kqueue.9 knlist_destroy.9 \ kqueue.9 knlist_empty.9 \ kqueue.9 knlist_init.9 \ kqueue.9 knlist_init_mtx.9 \ kqueue.9 knlist_init_rw_reader.9 \ kqueue.9 knlist_remove.9 \ kqueue.9 knlist_remove_inevent.9 \ kqueue.9 knote_fdclose.9 \ kqueue.9 KNOTE_LOCKED.9 \ kqueue.9 KNOTE_UNLOCKED.9 \ kqueue.9 kqfd_register.9 \ kqueue.9 kqueue_add_filteropts.9 \ kqueue.9 kqueue_del_filteropts.9 MLINKS+=kthread.9 kthread_add.9 \ kthread.9 kthread_exit.9 \ kthread.9 kthread_resume.9 \ kthread.9 kthread_shutdown.9 \ kthread.9 kthread_start.9 \ kthread.9 kthread_suspend.9 \ kthread.9 kthread_suspend_check.9 MLINKS+=ktr.9 CTR0.9 \ ktr.9 CTR1.9 \ ktr.9 CTR2.9 \ ktr.9 CTR3.9 \ ktr.9 CTR4.9 \ ktr.9 CTR5.9 \ ktr.9 CTR6.9 MLINKS+=lock.9 lockdestroy.9 \ lock.9 lockinit.9 \ lock.9 lockmgr.9 \ lock.9 lockmgr_args.9 \ lock.9 lockmgr_args_rw.9 \ lock.9 lockmgr_assert.9 \ lock.9 lockmgr_disown.9 \ lock.9 lockmgr_printinfo.9 \ lock.9 lockmgr_recursed.9 \ lock.9 lockmgr_rw.9 \ lock.9 lockstatus.9 MLINKS+=LOCK_PROFILING.9 MUTEX_PROFILING.9 MLINKS+=make_dev.9 destroy_dev.9 \ make_dev.9 destroy_dev_drain.9 \ make_dev.9 destroy_dev_sched.9 \ make_dev.9 destroy_dev_sched_cb.9 \ make_dev.9 dev_depends.9 \ make_dev.9 make_dev_alias.9 \ make_dev.9 make_dev_alias_p.9 \ make_dev.9 make_dev_cred.9 \ make_dev.9 make_dev_credf.9 \ make_dev.9 make_dev_p.9 \ make_dev.9 make_dev_s.9 MLINKS+=malloc.9 free.9 \ malloc.9 malloc_domainset.9 \ malloc.9 mallocarray.9 \ malloc.9 mallocarray_domainset.9 \ malloc.9 MALLOC_DECLARE.9 \ malloc.9 MALLOC_DEFINE.9 \ malloc.9 realloc.9 \ malloc.9 reallocf.9 MLINKS+=mbchain.9 mb_detach.9 \ mbchain.9 mb_done.9 \ mbchain.9 mb_fixhdr.9 \ mbchain.9 mb_init.9 \ mbchain.9 mb_initm.9 \ mbchain.9 mb_put_int64be.9 \ mbchain.9 mb_put_int64le.9 \ mbchain.9 mb_put_mbuf.9 \ mbchain.9 mb_put_mem.9 \ mbchain.9 mb_put_uint16be.9 \ mbchain.9 mb_put_uint16le.9 \ mbchain.9 mb_put_uint32be.9 \ mbchain.9 mb_put_uint32le.9 \ mbchain.9 mb_put_uint8.9 \ mbchain.9 mb_put_uio.9 \ mbchain.9 mb_reserve.9 MLINKS+=\ mbuf.9 m_adj.9 \ mbuf.9 m_align.9 \ mbuf.9 M_ALIGN.9 \ mbuf.9 m_append.9 \ mbuf.9 m_apply.9 \ mbuf.9 m_cat.9 \ mbuf.9 m_catpkt.9 \ mbuf.9 MCHTYPE.9 \ mbuf.9 MCLGET.9 \ mbuf.9 m_collapse.9 \ mbuf.9 m_copyback.9 \ mbuf.9 m_copydata.9 \ mbuf.9 m_copym.9 \ mbuf.9 m_copypacket.9 \ mbuf.9 m_copyup.9 \ mbuf.9 m_defrag.9 \ mbuf.9 m_devget.9 \ mbuf.9 m_dup.9 \ mbuf.9 m_dup_pkthdr.9 \ mbuf.9 MEXTADD.9 \ mbuf.9 m_fixhdr.9 \ mbuf.9 m_free.9 \ mbuf.9 m_freem.9 \ mbuf.9 MGET.9 \ mbuf.9 m_get.9 \ mbuf.9 m_get2.9 \ mbuf.9 m_getjcl.9 \ mbuf.9 m_getcl.9 \ mbuf.9 MGETHDR.9 \ mbuf.9 m_gethdr.9 \ mbuf.9 m_getm.9 \ mbuf.9 m_getptr.9 \ mbuf.9 MH_ALIGN.9 \ mbuf.9 M_LEADINGSPACE.9 \ mbuf.9 m_length.9 \ mbuf.9 M_MOVE_PKTHDR.9 \ mbuf.9 m_move_pkthdr.9 \ mbuf.9 M_PREPEND.9 \ mbuf.9 m_prepend.9 \ mbuf.9 m_pulldown.9 \ mbuf.9 m_pullup.9 \ mbuf.9 m_split.9 \ mbuf.9 mtod.9 \ mbuf.9 M_TRAILINGSPACE.9 \ mbuf.9 m_unshare.9 \ mbuf.9 M_WRITABLE.9 MLINKS+=\ mbuf_tags.9 m_tag_alloc.9 \ mbuf_tags.9 m_tag_copy.9 \ mbuf_tags.9 m_tag_copy_chain.9 \ mbuf_tags.9 m_tag_delete.9 \ mbuf_tags.9 m_tag_delete_chain.9 \ mbuf_tags.9 m_tag_delete_nonpersistent.9 \ mbuf_tags.9 m_tag_find.9 \ mbuf_tags.9 m_tag_first.9 \ mbuf_tags.9 m_tag_free.9 \ mbuf_tags.9 m_tag_get.9 \ mbuf_tags.9 m_tag_init.9 \ mbuf_tags.9 m_tag_locate.9 \ mbuf_tags.9 m_tag_next.9 \ mbuf_tags.9 m_tag_prepend.9 \ mbuf_tags.9 m_tag_unlink.9 MLINKS+=MD5.9 MD5Init.9 \ MD5.9 MD5Transform.9 MLINKS+=mdchain.9 md_append_record.9 \ mdchain.9 md_done.9 \ mdchain.9 md_get_int64.9 \ mdchain.9 md_get_int64be.9 \ mdchain.9 md_get_int64le.9 \ mdchain.9 md_get_mbuf.9 \ mdchain.9 md_get_mem.9 \ mdchain.9 md_get_uint16.9 \ mdchain.9 md_get_uint16be.9 \ mdchain.9 md_get_uint16le.9 \ mdchain.9 md_get_uint32.9 \ mdchain.9 md_get_uint32be.9 \ mdchain.9 md_get_uint32le.9 \ mdchain.9 md_get_uint8.9 \ mdchain.9 md_get_uio.9 \ mdchain.9 md_initm.9 \ mdchain.9 md_next_record.9 MLINKS+=microtime.9 bintime.9 \ microtime.9 getbintime.9 \ microtime.9 getmicrotime.9 \ microtime.9 getnanotime.9 \ microtime.9 nanotime.9 MLINKS+=microuptime.9 binuptime.9 \ microuptime.9 getbinuptime.9 \ microuptime.9 getmicrouptime.9 \ microuptime.9 getnanouptime.9 \ microuptime.9 getsbinuptime.9 \ microuptime.9 nanouptime.9 \ microuptime.9 sbinuptime.9 MLINKS+=mi_switch.9 cpu_switch.9 \ mi_switch.9 cpu_throw.9 MLINKS+=mod_cc.9 CCV.9 \ mod_cc.9 DECLARE_CC_MODULE.9 MLINKS+=mtx_pool.9 mtx_pool_alloc.9 \ mtx_pool.9 mtx_pool_create.9 \ mtx_pool.9 mtx_pool_destroy.9 \ mtx_pool.9 mtx_pool_find.9 \ mtx_pool.9 mtx_pool_lock.9 \ mtx_pool.9 mtx_pool_lock_spin.9 \ mtx_pool.9 mtx_pool_unlock.9 \ mtx_pool.9 mtx_pool_unlock_spin.9 MLINKS+=mutex.9 mtx_assert.9 \ mutex.9 mtx_destroy.9 \ mutex.9 mtx_init.9 \ mutex.9 mtx_initialized.9 \ mutex.9 mtx_lock.9 \ mutex.9 mtx_lock_flags.9 \ mutex.9 mtx_lock_spin.9 \ mutex.9 mtx_lock_spin_flags.9 \ mutex.9 mtx_owned.9 \ mutex.9 mtx_recursed.9 \ mutex.9 mtx_sleep.9 \ mutex.9 MTX_SYSINIT.9 \ mutex.9 mtx_trylock.9 \ mutex.9 mtx_trylock_flags.9 \ mutex.9 mtx_trylock_spin.9 \ mutex.9 mtx_trylock_spin_flags.9 \ mutex.9 mtx_unlock.9 \ mutex.9 mtx_unlock_flags.9 \ mutex.9 mtx_unlock_spin.9 \ mutex.9 mtx_unlock_spin_flags.9 MLINKS+=namei.9 NDFREE.9 \ namei.9 NDINIT.9 MLINKS+=netisr.9 netisr_clearqdrops.9 \ netisr.9 netisr_default_flow2cpu.9 \ netisr.9 netisr_dispatch.9 \ netisr.9 netisr_dispatch_src.9 \ netisr.9 netisr_get_cpucount.9 \ netisr.9 netisr_get_cpuid.9 \ netisr.9 netisr_getqdrops.9 \ netisr.9 netisr_getqlimit.9 \ netisr.9 netisr_queue.9 \ netisr.9 netisr_queue_src.9 \ netisr.9 netisr_register.9 \ netisr.9 netisr_setqlimit.9 \ netisr.9 netisr_unregister.9 MLINKS+=nv.9 libnv.9 \ nv.9 nvlist.9 \ nv.9 nvlist_add_binary.9 \ nv.9 nvlist_add_bool.9 \ nv.9 nvlist_add_bool_array.9 \ nv.9 nvlist_add_descriptor.9 \ nv.9 nvlist_add_descriptor_array.9 \ nv.9 nvlist_add_null.9 \ nv.9 nvlist_add_number.9 \ nv.9 nvlist_add_number_array.9 \ nv.9 nvlist_add_nvlist.9 \ nv.9 nvlist_add_nvlist_array.9 \ nv.9 nvlist_add_string.9 \ nv.9 nvlist_add_stringf.9 \ nv.9 nvlist_add_stringv.9 \ nv.9 nvlist_add_string_array.9 \ nv.9 nvlist_append_bool_array.9 \ nv.9 nvlist_append_descriptor_array.9 \ nv.9 nvlist_append_nvlist_array.9 \ nv.9 nvlist_append_number_array.9 \ nv.9 nvlist_append_string_array.9 \ nv.9 nvlist_clone.9 \ nv.9 nvlist_create.9 \ nv.9 nvlist_destroy.9 \ nv.9 nvlist_dump.9 \ nv.9 nvlist_empty.9 \ nv.9 nvlist_error.9 \ nv.9 nvlist_exists.9 \ nv.9 nvlist_exists_binary.9 \ nv.9 nvlist_exists_bool.9 \ nv.9 nvlist_exists_bool_array.9 \ nv.9 nvlist_exists_descriptor.9 \ nv.9 nvlist_exists_descriptor_array.9 \ nv.9 nvlist_exists_null.9 \ nv.9 nvlist_exists_number.9 \ nv.9 nvlist_exists_number_array.9 \ nv.9 nvlist_exists_nvlist.9 \ nv.9 nvlist_exists_nvlist_array.9 \ nv.9 nvlist_exists_string.9 \ nv.9 nvlist_exists_type.9 \ nv.9 nvlist_fdump.9 \ nv.9 nvlist_flags.9 \ nv.9 nvlist_free.9 \ nv.9 nvlist_free_binary.9 \ nv.9 nvlist_free_bool.9 \ nv.9 nvlist_free_bool_array.9 \ nv.9 nvlist_free_descriptor.9 \ nv.9 nvlist_free_descriptor_array.9 \ nv.9 nvlist_free_null.9 \ nv.9 nvlist_free_number.9 \ nv.9 nvlist_free_number_array.9 \ nv.9 nvlist_free_nvlist.9 \ nv.9 nvlist_free_nvlist_array.9 \ nv.9 nvlist_free_string.9 \ nv.9 nvlist_free_string_array.9 \ nv.9 nvlist_free_type.9 \ nv.9 nvlist_get_binary.9 \ nv.9 nvlist_get_bool.9 \ nv.9 nvlist_get_bool_array.9 \ nv.9 nvlist_get_descriptor.9 \ nv.9 nvlist_get_descriptor_array.9 \ nv.9 nvlist_get_number.9 \ nv.9 nvlist_get_number_array.9 \ nv.9 nvlist_get_nvlist.9 \ nv.9 nvlist_get_nvlist_array.9 \ nv.9 nvlist_get_parent.9 \ nv.9 nvlist_get_string.9 \ nv.9 nvlist_get_string_array.9 \ nv.9 nvlist_move_binary.9 \ nv.9 nvlist_move_descriptor.9 \ nv.9 nvlist_move_descriptor_array.9 \ nv.9 nvlist_move_nvlist.9 \ nv.9 nvlist_move_nvlist_array.9 \ nv.9 nvlist_move_string.9 \ nv.9 nvlist_move_string_array.9 \ nv.9 nvlist_next.9 \ nv.9 nvlist_pack.9 \ nv.9 nvlist_recv.9 \ nv.9 nvlist_send.9 \ nv.9 nvlist_set_error.9 \ nv.9 nvlist_size.9 \ nv.9 nvlist_take_binary.9 \ nv.9 nvlist_take_bool.9 \ nv.9 nvlist_take_bool_array.9 \ nv.9 nvlist_take_descriptor.9 \ nv.9 nvlist_take_descriptor_array.9 \ nv.9 nvlist_take_number.9 \ nv.9 nvlist_take_number_array.9 \ nv.9 nvlist_take_nvlist.9 \ nv.9 nvlist_take_nvlist_array.9 \ nv.9 nvlist_take_string.9 \ nv.9 nvlist_take_string_array.9 \ nv.9 nvlist_unpack.9 \ nv.9 nvlist_xfer.9 MLINKS+=OF_child.9 OF_parent.9 \ OF_child.9 OF_peer.9 MLINKS+=OF_device_from_xref.9 OF_device_register_xref.9 \ OF_device_from_xref.9 OF_xref_from_device.9 MLINKS+=OF_getprop.9 OF_getencprop.9 \ OF_getprop.9 OF_getencprop_alloc.9 \ OF_getprop.9 OF_getencprop_alloc_multi.9 \ OF_getprop.9 OF_getprop_alloc.9 \ OF_getprop.9 OF_getprop_alloc_multi.9 \ OF_getprop.9 OF_getproplen.9 \ OF_getprop.9 OF_hasprop.9 \ OF_getprop.9 OF_nextprop.9 \ OF_getprop.9 OF_prop_free.9 \ OF_getprop.9 OF_searchencprop.9 \ OF_getprop.9 OF_searchprop.9 \ OF_getprop.9 OF_setprop.9 MLINKS+=OF_node_from_xref.9 OF_xref_from_node.9 MLINKS+=ofw_bus_is_compatible.9 ofw_bus_is_compatible_strict.9 \ ofw_bus_is_compatible.9 ofw_bus_node_is_compatible.9 \ ofw_bus_is_compatible.9 ofw_bus_search_compatible.9 MLINKS+= ofw_bus_status_okay.9 ofw_bus_get_status.9 \ ofw_bus_status_okay.9 ofw_bus_node_status_okay.9 MLINKS+=osd.9 osd_call.9 \ osd.9 osd_del.9 \ osd.9 osd_deregister.9 \ osd.9 osd_exit.9 \ osd.9 osd_get.9 \ osd.9 osd_register.9 \ osd.9 osd_set.9 MLINKS+=panic.9 vpanic.9 MLINKS+=PCBGROUP.9 in_pcbgroup_byhash.9 \ PCBGROUP.9 in_pcbgroup_byinpcb.9 \ PCBGROUP.9 in_pcbgroup_destroy.9 \ PCBGROUP.9 in_pcbgroup_enabled.9 \ PCBGROUP.9 in_pcbgroup_init.9 \ PCBGROUP.9 in_pcbgroup_remove.9 \ PCBGROUP.9 in_pcbgroup_update.9 \ PCBGROUP.9 in_pcbgroup_update_mbuf.9 \ PCBGROUP.9 in6_pcbgroup_byhash.9 MLINKS+=pci.9 pci_alloc_msi.9 \ pci.9 pci_alloc_msix.9 \ pci.9 pci_disable_busmaster.9 \ pci.9 pci_disable_io.9 \ pci.9 pci_enable_busmaster.9 \ pci.9 pci_enable_io.9 \ pci.9 pci_find_bsf.9 \ pci.9 pci_find_cap.9 \ pci.9 pci_find_dbsf.9 \ pci.9 pci_find_device.9 \ pci.9 pci_find_extcap.9 \ pci.9 pci_find_htcap.9 \ pci.9 pci_find_pcie_root_port.9 \ pci.9 pci_get_id.9 \ pci.9 pci_get_max_read_req.9 \ pci.9 pci_get_powerstate.9 \ pci.9 pci_get_vpd_ident.9 \ pci.9 pci_get_vpd_readonly.9 \ pci.9 pci_iov_attach.9 \ pci.9 pci_iov_attach_name.9 \ pci.9 pci_iov_detach.9 \ pci.9 pci_msi_count.9 \ pci.9 pci_msix_count.9 \ pci.9 pci_msix_pba_bar.9 \ pci.9 pci_msix_table_bar.9 \ pci.9 pci_pending_msix.9 \ pci.9 pci_read_config.9 \ pci.9 pci_release_msi.9 \ pci.9 pci_remap_msix.9 \ pci.9 pci_restore_state.9 \ pci.9 pci_save_state.9 \ pci.9 pci_set_powerstate.9 \ pci.9 pci_set_max_read_req.9 \ pci.9 pci_write_config.9 \ pci.9 pcie_adjust_config.9 \ pci.9 pcie_flr.9 \ pci.9 pcie_max_completion_timeout.9 \ pci.9 pcie_read_config.9 \ pci.9 pcie_wait_for_pending_transactions.9 \ pci.9 pcie_write_config.9 MLINKS+=pci_iov_schema.9 pci_iov_schema_alloc_node.9 \ pci_iov_schema.9 pci_iov_schema_add_bool.9 \ pci_iov_schema.9 pci_iov_schema_add_string.9 \ pci_iov_schema.9 pci_iov_schema_add_uint8.9 \ pci_iov_schema.9 pci_iov_schema_add_uint16.9 \ pci_iov_schema.9 pci_iov_schema_add_uint32.9 \ pci_iov_schema.9 pci_iov_schema_add_uint64.9 \ pci_iov_schema.9 pci_iov_schema_add_unicast_mac.9 MLINKS+=pfil.9 pfil_add_hook.9 \ pfil.9 pfil_head_register.9 \ pfil.9 pfil_head_unregister.9 \ pfil.9 pfil_remove_hook.9 \ pfil.9 pfil_run_hooks.9 \ pfil.9 pfil_link.9 MLINKS+=pfind.9 zpfind.9 MLINKS+=PHOLD.9 PRELE.9 \ PHOLD.9 _PHOLD.9 \ PHOLD.9 _PRELE.9 \ PHOLD.9 PROC_ASSERT_HELD.9 \ PHOLD.9 PROC_ASSERT_NOT_HELD.9 MLINKS+=pmap_copy.9 pmap_copy_page.9 MLINKS+=pmap_extract.9 pmap_extract_and_hold.9 MLINKS+=pmap_init.9 pmap_init2.9 MLINKS+=pmap_is_modified.9 pmap_ts_referenced.9 MLINKS+=pmap_pinit.9 pmap_pinit0.9 \ pmap_pinit.9 pmap_pinit2.9 MLINKS+=pmap_qenter.9 pmap_qremove.9 MLINKS+=pmap_quick_enter_page.9 pmap_quick_remove_page.9 MLINKS+=pmap_remove.9 pmap_remove_all.9 \ pmap_remove.9 pmap_remove_pages.9 MLINKS+=pmap_resident_count.9 pmap_wired_count.9 MLINKS+=pmap_zero_page.9 pmap_zero_area.9 MLINKS+=printf.9 log.9 \ printf.9 tprintf.9 \ printf.9 uprintf.9 MLINKS+=priv.9 priv_check.9 \ priv.9 priv_check_cred.9 MLINKS+=prng.9 prng32.9 \ prng.9 prng32_bounded.9 \ prng.9 prng64.9 \ prng.9 prng64_bounded.9 MLINKS+=proc_rwmem.9 proc_readmem.9 \ proc_rwmem.9 proc_writemem.9 MLINKS+=psignal.9 gsignal.9 \ psignal.9 pgsignal.9 \ psignal.9 tdsignal.9 MLINKS+=pwmbus.9 pwm.9 MLINKS+=random.9 arc4rand.9 \ random.9 arc4random.9 \ random.9 is_random_seeded.9 \ random.9 read_random.9 \ random.9 read_random_uio.9 \ random.9 srandom.9 MLINKS+=random_harvest.9 random_harvest_direct.9 \ random_harvest.9 random_harvest_fast.9 \ random_harvest.9 random_harvest_queue.9 MLINKS+=ratecheck.9 ppsratecheck.9 MLINKS+=refcount.9 refcount_acquire.9 \ refcount.9 refcount_acquire_checked.9 \ refcount.9 refcount_acquire_if_not_zero.9 \ refcount.9 refcount_init.9 \ refcount.9 refcount_load.9 \ refcount.9 refcount_release.9 \ refcount.9 refcount_release_if_last.9 \ refcount.9 refcount_release_if_not_last.9 MLINKS+=resource_int_value.9 resource_long_value.9 \ resource_int_value.9 resource_string_value.9 MLINKS+=rman.9 rman_activate_resource.9 \ rman.9 rman_adjust_resource.9 \ rman.9 rman_deactivate_resource.9 \ rman.9 rman_fini.9 \ rman.9 rman_first_free_region.9 \ rman.9 rman_get_bushandle.9 \ rman.9 rman_get_bustag.9 \ rman.9 rman_get_device.9 \ rman.9 rman_get_end.9 \ rman.9 rman_get_flags.9 \ rman.9 rman_get_mapping.9 \ rman.9 rman_get_rid.9 \ rman.9 rman_get_size.9 \ rman.9 rman_get_start.9 \ rman.9 rman_get_virtual.9 \ rman.9 rman_init.9 \ rman.9 rman_init_from_resource.9 \ rman.9 rman_is_region_manager.9 \ rman.9 rman_last_free_region.9 \ rman.9 rman_make_alignment_flags.9 \ rman.9 rman_manage_region.9 \ rman.9 rman_release_resource.9 \ rman.9 rman_reserve_resource.9 \ rman.9 rman_reserve_resource_bound.9 \ rman.9 rman_set_bushandle.9 \ rman.9 rman_set_bustag.9 \ rman.9 rman_set_mapping.9 \ rman.9 rman_set_rid.9 \ rman.9 rman_set_virtual.9 MLINKS+=rmlock.9 rm_assert.9 \ rmlock.9 rm_destroy.9 \ rmlock.9 rm_init.9 \ rmlock.9 rm_init_flags.9 \ rmlock.9 rm_rlock.9 \ rmlock.9 rm_runlock.9 \ rmlock.9 rm_sleep.9 \ rmlock.9 RM_SYSINIT.9 \ rmlock.9 RM_SYSINIT_FLAGS.9 \ rmlock.9 rm_try_rlock.9 \ rmlock.9 rm_wlock.9 \ rmlock.9 rm_wowned.9 \ rmlock.9 rm_wunlock.9 MLINKS+=rtalloc.9 rtalloc1.9 \ rtalloc.9 rtalloc_ign.9 \ rtalloc.9 RT_ADDREF.9 \ rtalloc.9 RT_LOCK.9 \ rtalloc.9 RT_REMREF.9 \ rtalloc.9 RT_RTFREE.9 \ rtalloc.9 RT_UNLOCK.9 \ rtalloc.9 RTFREE_LOCKED.9 \ rtalloc.9 RTFREE.9 \ rtalloc.9 rtfree.9 \ rtalloc.9 rtalloc1_fib.9 \ rtalloc.9 rtalloc_ign_fib.9 \ rtalloc.9 rtalloc_fib.9 MLINKS+=runqueue.9 choosethread.9 \ runqueue.9 procrunnable.9 \ runqueue.9 remrunqueue.9 \ runqueue.9 setrunqueue.9 MLINKS+=rwlock.9 rw_assert.9 \ rwlock.9 rw_destroy.9 \ rwlock.9 rw_downgrade.9 \ rwlock.9 rw_init.9 \ rwlock.9 rw_init_flags.9 \ rwlock.9 rw_initialized.9 \ rwlock.9 rw_rlock.9 \ rwlock.9 rw_runlock.9 \ rwlock.9 rw_unlock.9 \ rwlock.9 rw_sleep.9 \ rwlock.9 RW_SYSINIT.9 \ rwlock.9 RW_SYSINIT_FLAGS.9 \ rwlock.9 rw_try_rlock.9 \ rwlock.9 rw_try_upgrade.9 \ rwlock.9 rw_try_wlock.9 \ rwlock.9 rw_wlock.9 \ rwlock.9 rw_wowned.9 \ rwlock.9 rw_wunlock.9 MLINKS+=sbuf.9 sbuf_bcat.9 \ sbuf.9 sbuf_bcopyin.9 \ sbuf.9 sbuf_bcpy.9 \ sbuf.9 sbuf_cat.9 \ sbuf.9 sbuf_clear.9 \ sbuf.9 sbuf_clear_flags.9 \ sbuf.9 sbuf_copyin.9 \ sbuf.9 sbuf_cpy.9 \ sbuf.9 sbuf_data.9 \ sbuf.9 sbuf_delete.9 \ sbuf.9 sbuf_done.9 \ sbuf.9 sbuf_error.9 \ sbuf.9 sbuf_finish.9 \ sbuf.9 sbuf_get_flags.9 \ sbuf.9 sbuf_hexdump.9 \ sbuf.9 sbuf_len.9 \ sbuf.9 sbuf_new.9 \ sbuf.9 sbuf_new_auto.9 \ sbuf.9 sbuf_new_for_sysctl.9 \ sbuf.9 sbuf_nl_terminate.9 \ sbuf.9 sbuf_printf.9 \ sbuf.9 sbuf_printf_drain.9 \ sbuf.9 sbuf_putbuf.9 \ sbuf.9 sbuf_putc.9 \ sbuf.9 sbuf_set_drain.9 \ sbuf.9 sbuf_set_flags.9 \ sbuf.9 sbuf_setpos.9 \ sbuf.9 sbuf_start_section.9 \ sbuf.9 sbuf_end_section.9 \ sbuf.9 sbuf_trim.9 \ sbuf.9 sbuf_vprintf.9 MLINKS+=scheduler.9 curpriority_cmp.9 \ scheduler.9 maybe_resched.9 \ scheduler.9 propagate_priority.9 \ scheduler.9 resetpriority.9 \ scheduler.9 roundrobin.9 \ scheduler.9 roundrobin_interval.9 \ scheduler.9 schedclock.9 \ scheduler.9 schedcpu.9 \ scheduler.9 sched_setup.9 \ scheduler.9 setrunnable.9 \ scheduler.9 updatepri.9 MLINKS+=SDT.9 SDT_PROVIDER_DECLARE.9 \ SDT.9 SDT_PROVIDER_DEFINE.9 \ SDT.9 SDT_PROBE_DECLARE.9 \ SDT.9 SDT_PROBE_DEFINE.9 \ SDT.9 SDT_PROBE.9 MLINKS+=securelevel_gt.9 securelevel_ge.9 MLINKS+=selrecord.9 seldrain.9 \ selrecord.9 selwakeup.9 MLINKS+=sema.9 sema_destroy.9 \ sema.9 sema_init.9 \ sema.9 sema_post.9 \ sema.9 sema_timedwait.9 \ sema.9 sema_trywait.9 \ sema.9 sema_value.9 \ sema.9 sema_wait.9 MLINKS+=seqc.9 seqc_consistent.9 \ seqc.9 seqc_read.9 \ seqc.9 seqc_write_begin.9 \ seqc.9 seqc_write_end.9 MLINKS+=sf_buf.9 sf_buf_alloc.9 \ sf_buf.9 sf_buf_free.9 \ sf_buf.9 sf_buf_kva.9 \ sf_buf.9 sf_buf_page.9 MLINKS+=sglist.9 sglist_alloc.9 \ sglist.9 sglist_append.9 \ sglist.9 sglist_append_bio.9 \ sglist.9 sglist_append_mbuf.9 \ sglist.9 sglist_append_mbuf_epg.9 \ sglist.9 sglist_append_phys.9 \ sglist.9 sglist_append_sglist.9 \ sglist.9 sglist_append_single_mbuf.9 \ sglist.9 sglist_append_uio.9 \ sglist.9 sglist_append_user.9 \ sglist.9 sglist_append_vmpages.9 \ sglist.9 sglist_build.9 \ sglist.9 sglist_clone.9 \ sglist.9 sglist_consume_uio.9 \ sglist.9 sglist_count.9 \ sglist.9 sglist_count_mbuf_epg.9 \ sglist.9 sglist_count_vmpages.9 \ sglist.9 sglist_free.9 \ sglist.9 sglist_hold.9 \ sglist.9 sglist_init.9 \ sglist.9 sglist_join.9 \ sglist.9 sglist_length.9 \ sglist.9 sglist_reset.9 \ sglist.9 sglist_slice.9 \ sglist.9 sglist_split.9 MLINKS+=shm_map.9 shm_unmap.9 MLINKS+=signal.9 cursig.9 \ signal.9 execsigs.9 \ signal.9 issignal.9 \ signal.9 killproc.9 \ signal.9 pgsigio.9 \ signal.9 postsig.9 \ signal.9 SETSETNEQ.9 \ signal.9 SETSETOR.9 \ signal.9 SIGADDSET.9 \ signal.9 SIG_CONTSIGMASK.9 \ signal.9 SIGDELSET.9 \ signal.9 SIGEMPTYSET.9 \ signal.9 sigexit.9 \ signal.9 SIGFILLSET.9 \ signal.9 siginit.9 \ signal.9 SIGISEMPTY.9 \ signal.9 SIGISMEMBER.9 \ signal.9 SIGNOTEMPTY.9 \ signal.9 signotify.9 \ signal.9 SIGPENDING.9 \ signal.9 SIGSETAND.9 \ signal.9 SIGSETCANTMASK.9 \ signal.9 SIGSETEQ.9 \ signal.9 SIGSETNAND.9 \ signal.9 SIG_STOPSIGMASK.9 \ signal.9 trapsignal.9 MLINKS+=sleep.9 msleep.9 \ sleep.9 msleep_sbt.9 \ sleep.9 msleep_spin.9 \ sleep.9 msleep_spin_sbt.9 \ sleep.9 pause.9 \ sleep.9 pause_sig.9 \ sleep.9 pause_sbt.9 \ sleep.9 tsleep.9 \ sleep.9 tsleep_sbt.9 \ sleep.9 wakeup.9 \ sleep.9 wakeup_one.9 \ sleep.9 wakeup_any.9 MLINKS+=sleepqueue.9 init_sleepqueues.9 \ sleepqueue.9 sleepq_abort.9 \ sleepqueue.9 sleepq_add.9 \ sleepqueue.9 sleepq_alloc.9 \ sleepqueue.9 sleepq_broadcast.9 \ sleepqueue.9 sleepq_free.9 \ sleepqueue.9 sleepq_lookup.9 \ sleepqueue.9 sleepq_lock.9 \ sleepqueue.9 sleepq_release.9 \ sleepqueue.9 sleepq_remove.9 \ sleepqueue.9 sleepq_set_timeout.9 \ sleepqueue.9 sleepq_set_timeout_sbt.9 \ sleepqueue.9 sleepq_signal.9 \ sleepqueue.9 sleepq_sleepcnt.9 \ sleepqueue.9 sleepq_timedwait.9 \ sleepqueue.9 sleepq_timedwait_sig.9 \ sleepqueue.9 sleepq_type.9 \ sleepqueue.9 sleepq_wait.9 \ sleepqueue.9 sleepq_wait_sig.9 MLINKS+=socket.9 soabort.9 \ socket.9 soaccept.9 \ socket.9 sobind.9 \ socket.9 socheckuid.9 \ socket.9 soclose.9 \ socket.9 soconnect.9 \ socket.9 socreate.9 \ socket.9 sodisconnect.9 \ socket.9 sodtor_set.9 \ socket.9 sodupsockaddr.9 \ socket.9 sofree.9 \ socket.9 sogetopt.9 \ socket.9 sohasoutofband.9 \ socket.9 solisten.9 \ socket.9 solisten_proto.9 \ socket.9 solisten_proto_check.9 \ socket.9 sonewconn.9 \ socket.9 sooptcopyin.9 \ socket.9 sooptcopyout.9 \ socket.9 sopoll.9 \ socket.9 sopoll_generic.9 \ socket.9 soreceive.9 \ socket.9 soreceive_dgram.9 \ socket.9 soreceive_generic.9 \ socket.9 soreceive_stream.9 \ socket.9 soreserve.9 \ socket.9 sorflush.9 \ socket.9 sosend.9 \ socket.9 sosend_dgram.9 \ socket.9 sosend_generic.9 \ socket.9 sosetopt.9 \ socket.9 soshutdown.9 \ socket.9 sotoxsocket.9 \ socket.9 soupcall_clear.9 \ socket.9 soupcall_set.9 \ socket.9 sowakeup.9 MLINKS+=stack.9 stack_copy.9 \ stack.9 stack_create.9 \ stack.9 stack_destroy.9 \ stack.9 stack_print.9 \ stack.9 stack_print_ddb.9 \ stack.9 stack_print_short.9 \ stack.9 stack_print_short_ddb.9 \ stack.9 stack_put.9 \ stack.9 stack_save.9 \ stack.9 stack_sbuf_print.9 \ stack.9 stack_sbuf_print_ddb.9 \ stack.9 stack_zero.9 MLINKS+=store.9 subyte.9 \ store.9 suword.9 \ store.9 suword16.9 \ store.9 suword32.9 \ store.9 suword64.9 MLINKS+=swi.9 swi_add.9 \ swi.9 swi_remove.9 \ swi.9 swi_sched.9 MLINKS+=sx.9 sx_assert.9 \ sx.9 sx_destroy.9 \ sx.9 sx_downgrade.9 \ sx.9 sx_init.9 \ sx.9 sx_init_flags.9 \ sx.9 sx_sleep.9 \ sx.9 sx_slock.9 \ sx.9 sx_slock_sig.9 \ sx.9 sx_sunlock.9 \ sx.9 SX_SYSINIT.9 \ sx.9 SX_SYSINIT_FLAGS.9 \ sx.9 sx_try_slock.9 \ sx.9 sx_try_upgrade.9 \ sx.9 sx_try_xlock.9 \ sx.9 sx_unlock.9 \ sx.9 sx_xholder.9 \ sx.9 sx_xlock.9 \ sx.9 sx_xlock_sig.9 \ sx.9 sx_xlocked.9 \ sx.9 sx_xunlock.9 MLINKS+=syscall_helper_register.9 syscall_helper_unregister.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER_COMPAT.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER_COMPAT_F.9 \ syscall_helper_register.9 SYSCALL_INIT_HELPER_F.9 MLINKS+=sysctl.9 SYSCTL_DECL.9 \ sysctl.9 SYSCTL_ADD_INT.9 \ sysctl.9 SYSCTL_ADD_LONG.9 \ sysctl.9 SYSCTL_ADD_NODE.9 \ sysctl.9 SYSCTL_ADD_NODE_WITH_LABEL.9 \ sysctl.9 SYSCTL_ADD_OPAQUE.9 \ sysctl.9 SYSCTL_ADD_PROC.9 \ sysctl.9 SYSCTL_ADD_QUAD.9 \ sysctl.9 SYSCTL_ADD_ROOT_NODE.9 \ sysctl.9 SYSCTL_ADD_S8.9 \ sysctl.9 SYSCTL_ADD_S16.9 \ sysctl.9 SYSCTL_ADD_S32.9 \ sysctl.9 SYSCTL_ADD_S64.9 \ sysctl.9 SYSCTL_ADD_STRING.9 \ sysctl.9 SYSCTL_ADD_STRUCT.9 \ sysctl.9 SYSCTL_ADD_TIMEVAL_SEC.9 \ sysctl.9 SYSCTL_ADD_U8.9 \ sysctl.9 SYSCTL_ADD_U16.9 \ sysctl.9 SYSCTL_ADD_U32.9 \ sysctl.9 SYSCTL_ADD_U64.9 \ sysctl.9 SYSCTL_ADD_UAUTO.9 \ sysctl.9 SYSCTL_ADD_UINT.9 \ sysctl.9 SYSCTL_ADD_ULONG.9 \ sysctl.9 SYSCTL_ADD_UMA_CUR.9 \ sysctl.9 SYSCTL_ADD_UMA_MAX.9 \ sysctl.9 SYSCTL_ADD_UQUAD.9 \ sysctl.9 SYSCTL_CHILDREN.9 \ sysctl.9 SYSCTL_STATIC_CHILDREN.9 \ sysctl.9 SYSCTL_NODE_CHILDREN.9 \ sysctl.9 SYSCTL_PARENT.9 \ sysctl.9 SYSCTL_INT.9 \ sysctl.9 SYSCTL_INT_WITH_LABEL.9 \ sysctl.9 SYSCTL_LONG.9 \ sysctl.9 sysctl_msec_to_ticks.9 \ sysctl.9 SYSCTL_NODE.9 \ sysctl.9 SYSCTL_NODE_WITH_LABEL.9 \ sysctl.9 SYSCTL_OPAQUE.9 \ sysctl.9 SYSCTL_PROC.9 \ sysctl.9 SYSCTL_QUAD.9 \ sysctl.9 SYSCTL_ROOT_NODE.9 \ sysctl.9 SYSCTL_S8.9 \ sysctl.9 SYSCTL_S16.9 \ sysctl.9 SYSCTL_S32.9 \ sysctl.9 SYSCTL_S64.9 \ sysctl.9 SYSCTL_STRING.9 \ sysctl.9 SYSCTL_STRUCT.9 \ sysctl.9 SYSCTL_TIMEVAL_SEC.9 \ sysctl.9 SYSCTL_U8.9 \ sysctl.9 SYSCTL_U16.9 \ sysctl.9 SYSCTL_U32.9 \ sysctl.9 SYSCTL_U64.9 \ sysctl.9 SYSCTL_UINT.9 \ sysctl.9 SYSCTL_ULONG.9 \ sysctl.9 SYSCTL_UMA_CUR.9 \ sysctl.9 SYSCTL_UMA_MAX.9 \ sysctl.9 SYSCTL_UQUAD.9 MLINKS+=sysctl_add_oid.9 sysctl_move_oid.9 \ sysctl_add_oid.9 sysctl_remove_oid.9 \ sysctl_add_oid.9 sysctl_remove_name.9 MLINKS+=sysctl_ctx_init.9 sysctl_ctx_entry_add.9 \ sysctl_ctx_init.9 sysctl_ctx_entry_del.9 \ sysctl_ctx_init.9 sysctl_ctx_entry_find.9 \ sysctl_ctx_init.9 sysctl_ctx_free.9 MLINKS+=SYSINIT.9 SYSUNINIT.9 MLINKS+=taskqueue.9 TASK_INIT.9 \ taskqueue.9 TASK_INITIALIZER.9 \ taskqueue.9 taskqueue_block.9 \ taskqueue.9 taskqueue_cancel.9 \ taskqueue.9 taskqueue_cancel_timeout.9 \ taskqueue.9 taskqueue_create.9 \ taskqueue.9 taskqueue_create_fast.9 \ taskqueue.9 TASKQUEUE_DECLARE.9 \ taskqueue.9 TASKQUEUE_DEFINE.9 \ taskqueue.9 TASKQUEUE_DEFINE_THREAD.9 \ taskqueue.9 taskqueue_drain.9 \ taskqueue.9 taskqueue_drain_all.9 \ taskqueue.9 taskqueue_drain_timeout.9 \ taskqueue.9 taskqueue_enqueue.9 \ taskqueue.9 taskqueue_enqueue_timeout.9 \ taskqueue.9 TASKQUEUE_FAST_DEFINE.9 \ taskqueue.9 TASKQUEUE_FAST_DEFINE_THREAD.9 \ taskqueue.9 taskqueue_free.9 \ taskqueue.9 taskqueue_member.9 \ taskqueue.9 taskqueue_quiesce.9 \ taskqueue.9 taskqueue_run.9 \ taskqueue.9 taskqueue_set_callback.9 \ taskqueue.9 taskqueue_start_threads.9 \ taskqueue.9 taskqueue_start_threads_cpuset.9 \ taskqueue.9 taskqueue_start_threads_in_proc.9 \ taskqueue.9 taskqueue_unblock.9 \ taskqueue.9 TIMEOUT_TASK_INIT.9 MLINKS+=tcp_functions.9 register_tcp_functions.9 \ tcp_functions.9 register_tcp_functions_as_name.9 \ tcp_functions.9 register_tcp_functions_as_names.9 \ tcp_functions.9 deregister_tcp_functions.9 MLINKS+=time.9 boottime.9 \ time.9 time_second.9 \ time.9 time_uptime.9 MLINKS+=ucred.9 crcopy.9 \ ucred.9 crcopysafe.9 \ ucred.9 crdup.9 \ ucred.9 crfree.9 \ ucred.9 crget.9 \ ucred.9 crhold.9 \ ucred.9 crsetgroups.9 \ ucred.9 cru2x.9 MLINKS+=uidinfo.9 uifind.9 \ uidinfo.9 uifree.9 \ uidinfo.9 uihashinit.9 \ uidinfo.9 uihold.9 MLINKS+=uio.9 uiomove.9 \ uio.9 uiomove_frombuf.9 \ uio.9 uiomove_nofault.9 MLINKS+=unr.9 alloc_unr.9 \ unr.9 alloc_unrl.9 \ unr.9 alloc_unr_specific.9 \ unr.9 clear_unrhdr.9 \ unr.9 delete_unrhdr.9 \ unr.9 free_unr.9 \ unr.9 new_unrhdr.9 .if ${MK_USB} != "no" MAN+= usbdi.9 MLINKS+=usbdi.9 usbd_do_request.9 \ usbdi.9 usbd_do_request_flags.9 \ usbdi.9 usbd_errstr.9 \ usbdi.9 usbd_lookup_id_by_info.9 \ usbdi.9 usbd_lookup_id_by_uaa.9 \ usbdi.9 usbd_transfer_clear_stall.9 \ usbdi.9 usbd_transfer_drain.9 \ usbdi.9 usbd_transfer_pending.9 \ usbdi.9 usbd_transfer_poll.9 \ usbdi.9 usbd_transfer_setup.9 \ usbdi.9 usbd_transfer_start.9 \ usbdi.9 usbd_transfer_stop.9 \ usbdi.9 usbd_transfer_submit.9 \ usbdi.9 usbd_transfer_unsetup.9 \ usbdi.9 usbd_xfer_clr_flag.9 \ usbdi.9 usbd_xfer_frame_data.9 \ usbdi.9 usbd_xfer_frame_len.9 \ usbdi.9 usbd_xfer_get_frame.9 \ usbdi.9 usbd_xfer_get_priv.9 \ usbdi.9 usbd_xfer_is_stalled.9 \ usbdi.9 usbd_xfer_max_framelen.9 \ usbdi.9 usbd_xfer_max_frames.9 \ usbdi.9 usbd_xfer_max_len.9 \ usbdi.9 usbd_xfer_set_flag.9 \ usbdi.9 usbd_xfer_set_frame_data.9 \ usbdi.9 usbd_xfer_set_frame_len.9 \ usbdi.9 usbd_xfer_set_frame_offset.9 \ usbdi.9 usbd_xfer_set_frames.9 \ usbdi.9 usbd_xfer_set_interval.9 \ usbdi.9 usbd_xfer_set_priv.9 \ usbdi.9 usbd_xfer_set_stall.9 \ usbdi.9 usbd_xfer_set_timeout.9 \ usbdi.9 usbd_xfer_softc.9 \ usbdi.9 usbd_xfer_state.9 \ usbdi.9 usbd_xfer_status.9 \ usbdi.9 usb_fifo_alloc_buffer.9 \ usbdi.9 usb_fifo_attach.9 \ usbdi.9 usb_fifo_detach.9 \ usbdi.9 usb_fifo_free_buffer.9 \ usbdi.9 usb_fifo_get_data.9 \ usbdi.9 usb_fifo_get_data_buffer.9 \ usbdi.9 usb_fifo_get_data_error.9 \ usbdi.9 usb_fifo_get_data_linear.9 \ usbdi.9 usb_fifo_put_bytes_max.9 \ usbdi.9 usb_fifo_put_data.9 \ usbdi.9 usb_fifo_put_data_buffer.9 \ usbdi.9 usb_fifo_put_data_error.9 \ usbdi.9 usb_fifo_put_data_linear.9 \ usbdi.9 usb_fifo_reset.9 \ usbdi.9 usb_fifo_softc.9 \ usbdi.9 usb_fifo_wakeup.9 .endif MLINKS+=vcount.9 count_dev.9 MLINKS+=vfsconf.9 vfs_modevent.9 \ vfsconf.9 vfs_register.9 \ vfsconf.9 vfs_unregister.9 MLINKS+=vfs_getopt.9 vfs_copyopt.9 \ vfs_getopt.9 vfs_filteropt.9 \ vfs_getopt.9 vfs_flagopt.9 \ vfs_getopt.9 vfs_getopts.9 \ vfs_getopt.9 vfs_scanopt.9 \ vfs_getopt.9 vfs_setopt.9 \ vfs_getopt.9 vfs_setopt_part.9 \ vfs_getopt.9 vfs_setopts.9 MLINKS+=vhold.9 vdrop.9 \ vhold.9 vdropl.9 \ vhold.9 vholdl.9 MLINKS+=vmem.9 vmem_add.9 \ vmem.9 vmem_alloc.9 \ vmem.9 vmem_create.9 \ vmem.9 vmem_destroy.9 \ vmem.9 vmem_free.9 \ vmem.9 vmem_xalloc.9 \ vmem.9 vmem_xfree.9 MLINKS+=vm_map_lock.9 vm_map_lock_downgrade.9 \ vm_map_lock.9 vm_map_lock_read.9 \ vm_map_lock.9 vm_map_lock_upgrade.9 \ vm_map_lock.9 vm_map_trylock.9 \ vm_map_lock.9 vm_map_trylock_read.9 \ vm_map_lock.9 vm_map_unlock.9 \ vm_map_lock.9 vm_map_unlock_read.9 MLINKS+=vm_map_lookup.9 vm_map_lookup_done.9 MLINKS+=vm_map_max.9 vm_map_min.9 \ vm_map_max.9 vm_map_pmap.9 MLINKS+=vm_map_stack.9 vm_map_growstack.9 MLINKS+=vm_map_wire.9 vm_map_wire_mapped.9 \ vm_page_wire.9 vm_page_unwire.9 \ vm_page_wire.9 vm_page_unwire_noq.9 MLINKS+=vm_page_bits.9 vm_page_clear_dirty.9 \ vm_page_bits.9 vm_page_dirty.9 \ vm_page_bits.9 vm_page_is_valid.9 \ vm_page_bits.9 vm_page_set_invalid.9 \ vm_page_bits.9 vm_page_set_validclean.9 \ vm_page_bits.9 vm_page_test_dirty.9 \ vm_page_bits.9 vm_page_undirty.9 \ vm_page_bits.9 vm_page_zero_invalid.9 MLINKS+=vm_page_busy.9 vm_page_busied.9 \ vm_page_busy.9 vm_page_busy_downgrade.9 \ vm_page_busy.9 vm_page_busy_sleep.9 \ vm_page_busy.9 vm_page_sbusied.9 \ vm_page_busy.9 vm_page_sbusy.9 \ vm_page_busy.9 vm_page_sleep_if_busy.9 \ vm_page_busy.9 vm_page_sunbusy.9 \ vm_page_busy.9 vm_page_trysbusy.9 \ vm_page_busy.9 vm_page_tryxbusy.9 \ vm_page_busy.9 vm_page_xbusied.9 \ vm_page_busy.9 vm_page_xbusy.9 \ vm_page_busy.9 vm_page_xunbusy.9 \ vm_page_busy.9 vm_page_assert_sbusied.9 \ vm_page_busy.9 vm_page_assert_unbusied.9 \ vm_page_busy.9 vm_page_assert_xbusied.9 MLINKS+=vm_page_aflag.9 vm_page_aflag_clear.9 \ vm_page_aflag.9 vm_page_aflag_set.9 \ vm_page_aflag.9 vm_page_reference.9 MLINKS+=vm_page_free.9 vm_page_free_toq.9 \ vm_page_free.9 vm_page_free_zero.9 \ vm_page_free.9 vm_page_try_to_free.9 MLINKS+=vm_page_insert.9 vm_page_remove.9 MLINKS+=vm_page_wire.9 vm_page_unwire.9 MLINKS+=VOP_ACCESS.9 VOP_ACCESSX.9 MLINKS+=VOP_ATTRIB.9 VOP_GETATTR.9 \ VOP_ATTRIB.9 VOP_SETATTR.9 \ VOP_ATTRIB.9 VOP_STAT.9 MLINKS+=VOP_CREATE.9 VOP_MKDIR.9 \ VOP_CREATE.9 VOP_MKNOD.9 \ VOP_CREATE.9 VOP_SYMLINK.9 MLINKS+=VOP_FSYNC.9 VOP_FDATASYNC.9 MLINKS+=VOP_GETPAGES.9 VOP_PUTPAGES.9 MLINKS+=VOP_INACTIVE.9 VOP_RECLAIM.9 MLINKS+=VOP_LOCK.9 vn_lock.9 \ VOP_LOCK.9 VOP_ISLOCKED.9 \ VOP_LOCK.9 VOP_UNLOCK.9 MLINKS+=VOP_OPENCLOSE.9 VOP_CLOSE.9 \ VOP_OPENCLOSE.9 VOP_OPEN.9 MLINKS+=VOP_RDWR.9 VOP_READ.9 \ VOP_RDWR.9 VOP_WRITE.9 MLINKS+=VOP_REMOVE.9 VOP_RMDIR.9 MLINKS+=vnet.9 vimage.9 MLINKS+=vref.9 VREF.9 \ vref.9 vrefl.9 MLINKS+=vrele.9 vput.9 \ vrele.9 vunref.9 MLINKS+=vslock.9 vsunlock.9 MLINKS+=zone.9 uma.9 \ zone.9 uma_prealloc.9 \ zone.9 uma_reclaim.9 \ zone.9 uma_zalloc.9 \ zone.9 uma_zalloc_arg.9 \ zone.9 uma_zalloc_domain.9 \ zone.9 uma_zalloc_pcpu.9 \ zone.9 uma_zalloc_pcpu_arg.9 \ zone.9 uma_zcache_create.9 \ zone.9 uma_zcreate.9 \ zone.9 uma_zdestroy.9 \ zone.9 uma_zfree.9 \ zone.9 uma_zfree_arg.9 \ zone.9 uma_zfree_pcpu.9 \ zone.9 uma_zfree_pcpu_arg.9 \ zone.9 uma_zone_get_cur.9 \ zone.9 uma_zone_get_max.9 \ zone.9 uma_zone_reclaim.9 \ zone.9 uma_zone_reserve.9 \ zone.9 uma_zone_reserve_kva.9 \ zone.9 uma_zone_set_allocf.9 \ zone.9 uma_zone_set_freef.9 \ zone.9 uma_zone_set_max.9 \ zone.9 uma_zone_set_maxaction.9 \ zone.9 uma_zone_set_maxcache.9 \ zone.9 uma_zone_set_warning.9 \ zone.9 uma_zsecond_create.9 .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386" _superio.9= superio.9 MLINKS+=superio.9 superio_devid.9 \ superio.9 superio_dev_disable.9 \ superio.9 superio_dev_enable.9 \ superio.9 superio_dev_enabled.9 \ superio.9 superio_find_dev.9 \ superio.9 superio_find_dev.9 \ superio.9 superio_get_dma.9 \ superio.9 superio_get_iobase.9 \ superio.9 superio_get_irq.9 \ superio.9 superio_get_ldn.9 \ superio.9 superio_get_type.9 \ superio.9 superio_read.9 \ superio.9 superio_revid.9 \ superio.9 superio_vendor.9 \ superio.9 superio_write.9 .endif .include diff --git a/sys/arm/allwinner/a10_ahci.c b/sys/arm/allwinner/a10_ahci.c index 399f17b079ef..5e6f53291394 100644 --- a/sys/arm/allwinner/a10_ahci.c +++ b/sys/arm/allwinner/a10_ahci.c @@ -1,426 +1,426 @@ /*- * Copyright (c) 2015 Luiz Otavio O Souza All rights reserved. * Copyright (c) 2014-2015 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The magic-bit-bang sequence used in this code may be based on a linux * platform driver in the Allwinner SDK from Allwinner Technology Co., Ltd. * www.allwinnertech.com, by Daniel Wang * though none of the original code was copied. */ #include "opt_bus.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include /* * Allwinner a1x/a2x/a8x SATA attachment. This is just the AHCI register * set with a few extra implementation-specific registers that need to * be accounted for. There's only one PHY in the system, and it needs * to be trained to bring the link up. In addition, there's some DMA * specific things that need to be done as well. These things are also * just about completely undocumented, except in ugly code in the Linux * SDK Allwinner releases. */ /* BITx -- Unknown bit that needs to be set/cleared at position x */ /* UFx -- Uknown multi-bit field frobbed during init */ #define AHCI_BISTAFR 0x00A0 #define AHCI_BISTCR 0x00A4 #define AHCI_BISTFCTR 0x00A8 #define AHCI_BISTSR 0x00AC #define AHCI_BISTDECR 0x00B0 #define AHCI_DIAGNR 0x00B4 #define AHCI_DIAGNR1 0x00B8 #define AHCI_OOBR 0x00BC #define AHCI_PHYCS0R 0x00C0 /* Bits 0..17 are a mystery */ #define PHYCS0R_BIT18 (1 << 18) #define PHYCS0R_POWER_ENABLE (1 << 19) #define PHYCS0R_UF1_MASK (7 << 20) /* Unknown Field 1 */ #define PHYCS0R_UF1_INIT (3 << 20) #define PHYCS0R_BIT23 (1 << 23) #define PHYCS0R_UF2_MASK (7 << 24) /* Uknown Field 2 */ #define PHYCS0R_UF2_INIT (5 << 24) /* Bit 27 mystery */ #define PHYCS0R_POWER_STATUS_MASK (7 << 28) #define PHYCS0R_PS_GOOD (2 << 28) /* Bit 31 mystery */ #define AHCI_PHYCS1R 0x00C4 /* Bits 0..5 are a mystery */ #define PHYCS1R_UF1_MASK (3 << 6) #define PHYCS1R_UF1_INIT (2 << 6) #define PHYCS1R_UF2_MASK (0x1f << 8) #define PHYCS1R_UF2_INIT (6 << 8) /* Bits 13..14 are a mystery */ #define PHYCS1R_BIT15 (1 << 15) #define PHYCS1R_UF3_MASK (3 << 16) #define PHYCS1R_UF3_INIT (2 << 16) /* Bit 18 mystery */ #define PHYCS1R_HIGHZ (1 << 19) /* Bits 20..27 mystery */ #define PHYCS1R_BIT28 (1 << 28) /* Bits 29..31 mystery */ #define AHCI_PHYCS2R 0x00C8 /* bits 0..4 mystery */ #define PHYCS2R_UF1_MASK (0x1f << 5) #define PHYCS2R_UF1_INIT (0x19 << 5) /* Bits 10..23 mystery */ #define PHYCS2R_CALIBRATE (1 << 24) /* Bits 25..31 mystery */ #define AHCI_TIMER1MS 0x00E0 #define AHCI_GPARAM1R 0x00E8 #define AHCI_GPARAM2R 0x00EC #define AHCI_PPARAMR 0x00F0 #define AHCI_TESTR 0x00F4 #define AHCI_VERSIONR 0x00F8 #define AHCI_IDR 0x00FC #define AHCI_RWCR 0x00FC #define AHCI_P0DMACR 0x0070 #define AHCI_P0PHYCR 0x0078 #define AHCI_P0PHYSR 0x007C #define PLL_FREQ 100000000 struct ahci_a10_softc { struct ahci_controller ahci_ctlr; regulator_t ahci_reg; clk_t clk_pll; clk_t clk_gate; }; static void inline ahci_set(struct resource *m, bus_size_t off, uint32_t set) { uint32_t val = ATA_INL(m, off); val |= set; ATA_OUTL(m, off, val); } static void inline ahci_clr(struct resource *m, bus_size_t off, uint32_t clr) { uint32_t val = ATA_INL(m, off); val &= ~clr; ATA_OUTL(m, off, val); } static void inline ahci_mask_set(struct resource *m, bus_size_t off, uint32_t mask, uint32_t set) { uint32_t val = ATA_INL(m, off); val &= mask; val |= set; ATA_OUTL(m, off, val); } /* * Should this be phy_reset or phy_init */ #define PHY_RESET_TIMEOUT 1000 static void ahci_a10_phy_reset(device_t dev) { uint32_t to, val; struct ahci_controller *ctlr = device_get_softc(dev); /* * Here starts the magic -- most of the comments are based * on guesswork, names of routines and printf error * messages. The code works, but it will do that even if the * comments are 100% BS. */ /* * Lock out other access while we initialize. Or at least that * seems to be the case based on Linux SDK #defines. Maybe this * put things into reset? */ ATA_OUTL(ctlr->r_mem, AHCI_RWCR, 0); DELAY(100); /* * Set bit 19 in PHYCS1R. Guessing this disables driving the PHY * port for a bit while we reset things. */ ahci_set(ctlr->r_mem, AHCI_PHYCS1R, PHYCS1R_HIGHZ); /* * Frob PHYCS0R... */ ahci_mask_set(ctlr->r_mem, AHCI_PHYCS0R, ~PHYCS0R_UF2_MASK, PHYCS0R_UF2_INIT | PHYCS0R_BIT23 | PHYCS0R_BIT18); /* * Set three fields in PHYCS1R */ ahci_mask_set(ctlr->r_mem, AHCI_PHYCS1R, ~(PHYCS1R_UF1_MASK | PHYCS1R_UF2_MASK | PHYCS1R_UF3_MASK), PHYCS1R_UF1_INIT | PHYCS1R_UF2_INIT | PHYCS1R_UF3_INIT); /* * Two more mystery bits in PHYCS1R. -- can these be combined above? */ ahci_set(ctlr->r_mem, AHCI_PHYCS1R, PHYCS1R_BIT15 | PHYCS1R_BIT28); /* * Now clear that first mysery bit. Perhaps this starts * driving the PHY again so we can power it up and start * talking to the SATA drive, if any below. */ ahci_clr(ctlr->r_mem, AHCI_PHYCS1R, PHYCS1R_HIGHZ); /* * Frob PHYCS0R again... */ ahci_mask_set(ctlr->r_mem, AHCI_PHYCS0R, ~PHYCS0R_UF1_MASK, PHYCS0R_UF1_INIT); /* * Frob PHYCS2R, because 25 means something? */ ahci_mask_set(ctlr->r_mem, AHCI_PHYCS2R, ~PHYCS2R_UF1_MASK, PHYCS2R_UF1_INIT); DELAY(100); /* WAG */ /* * Turn on the power to the PHY and wait for it to report back * good? */ ahci_set(ctlr->r_mem, AHCI_PHYCS0R, PHYCS0R_POWER_ENABLE); for (to = PHY_RESET_TIMEOUT; to > 0; to--) { val = ATA_INL(ctlr->r_mem, AHCI_PHYCS0R); if ((val & PHYCS0R_POWER_STATUS_MASK) == PHYCS0R_PS_GOOD) break; DELAY(10); } if (to == 0 && bootverbose) device_printf(dev, "PHY Power Failed PHYCS0R = %#x\n", val); /* * Calibrate the clocks between the device and the host. This appears * to be an automated process that clears the bit when it is done. */ ahci_set(ctlr->r_mem, AHCI_PHYCS2R, PHYCS2R_CALIBRATE); for (to = PHY_RESET_TIMEOUT; to > 0; to--) { val = ATA_INL(ctlr->r_mem, AHCI_PHYCS2R); if ((val & PHYCS2R_CALIBRATE) == 0) break; DELAY(10); } if (to == 0 && bootverbose) device_printf(dev, "PHY Cal Failed PHYCS2R %#x\n", val); /* * OK, let things settle down a bit. */ DELAY(1000); /* * Go back into normal mode now that we've calibrated the PHY. */ ATA_OUTL(ctlr->r_mem, AHCI_RWCR, 7); } static void ahci_a10_ch_start(struct ahci_channel *ch) { uint32_t reg; /* * Magical values from Allwinner SDK, setup the DMA before start * operations on this channel. */ reg = ATA_INL(ch->r_mem, AHCI_P0DMACR); reg &= ~0xff00; reg |= 0x4400; ATA_OUTL(ch->r_mem, AHCI_P0DMACR, reg); } static int ahci_a10_ctlr_reset(device_t dev) { ahci_a10_phy_reset(dev); return (ahci_ctlr_reset(dev)); } static int ahci_a10_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "allwinner,sun4i-a10-ahci")) return (ENXIO); device_set_desc(dev, "Allwinner Integrated AHCI controller"); return (BUS_PROBE_DEFAULT); } static int ahci_a10_attach(device_t dev) { int error; struct ahci_a10_softc *sc; struct ahci_controller *ctlr; sc = device_get_softc(dev); ctlr = &sc->ahci_ctlr; ctlr->quirks = AHCI_Q_NOPMP; ctlr->vendorid = 0; ctlr->deviceid = 0; ctlr->subvendorid = 0; ctlr->subdeviceid = 0; ctlr->r_rid = 0; if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return (ENXIO); /* Enable the (optional) regulator */ if (regulator_get_by_ofw_property(dev, 0, "target-supply", &sc->ahci_reg) == 0) { error = regulator_enable(sc->ahci_reg); if (error != 0) { device_printf(dev, "Could not enable regulator\n"); goto fail; } } /* Enable clocks */ error = clk_get_by_ofw_index(dev, 0, 0, &sc->clk_gate); if (error != 0) { device_printf(dev, "Cannot get gate clock\n"); goto fail; } error = clk_get_by_ofw_index(dev, 0, 1, &sc->clk_pll); if (error != 0) { device_printf(dev, "Cannot get PLL clock\n"); goto fail; } error = clk_set_freq(sc->clk_pll, PLL_FREQ, CLK_SET_ROUND_DOWN); if (error != 0) { device_printf(dev, "Cannot set PLL frequency\n"); goto fail; } error = clk_enable(sc->clk_pll); if (error != 0) { device_printf(dev, "Cannot enable PLL\n"); goto fail; } error = clk_enable(sc->clk_gate); if (error != 0) { device_printf(dev, "Cannot enable clk gate\n"); goto fail; } /* Reset controller */ if ((error = ahci_a10_ctlr_reset(dev)) != 0) goto fail; /* * No MSI registers on this platform. */ ctlr->msi = 0; ctlr->numirqs = 1; /* Channel start callback(). */ ctlr->ch_start = ahci_a10_ch_start; /* * Note: ahci_attach will release ctlr->r_mem on errors automatically */ return (ahci_attach(dev)); fail: if (sc->ahci_reg != NULL) regulator_disable(sc->ahci_reg); if (sc->clk_gate != NULL) clk_release(sc->clk_gate); if (sc->clk_pll != NULL) clk_release(sc->clk_pll); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (error); } static int ahci_a10_detach(device_t dev) { struct ahci_a10_softc *sc; struct ahci_controller *ctlr; sc = device_get_softc(dev); ctlr = &sc->ahci_ctlr; if (sc->ahci_reg != NULL) regulator_disable(sc->ahci_reg); if (sc->clk_gate != NULL) clk_release(sc->clk_gate); if (sc->clk_pll != NULL) clk_release(sc->clk_pll); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (ahci_detach(dev)); } static device_method_t ahci_ata_methods[] = { DEVMETHOD(device_probe, ahci_a10_probe), DEVMETHOD(device_attach, ahci_a10_attach), DEVMETHOD(device_detach, ahci_a10_detach), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr,ahci_teardown_intr), - DEVMETHOD(bus_child_location_str, ahci_child_location_str), + DEVMETHOD(bus_child_location, ahci_child_location), DEVMETHOD_END }; static driver_t ahci_ata_driver = { "ahci", ahci_ata_methods, sizeof(struct ahci_a10_softc) }; DRIVER_MODULE(a10_ahci, simplebus, ahci_ata_driver, ahci_devclass, 0, 0); diff --git a/sys/arm/freescale/imx/imx6_ahci.c b/sys/arm/freescale/imx/imx6_ahci.c index 8b49c71c34a0..457966d9b219 100644 --- a/sys/arm/freescale/imx/imx6_ahci.c +++ b/sys/arm/freescale/imx/imx6_ahci.c @@ -1,363 +1,363 @@ /*- * Copyright (c) 2017 Rogiel Sulzbach * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #define SATA_TIMER1MS 0x000000e0 #define SATA_P0PHYCR 0x00000178 #define SATA_P0PHYCR_CR_READ (1 << 19) #define SATA_P0PHYCR_CR_WRITE (1 << 18) #define SATA_P0PHYCR_CR_CAP_DATA (1 << 17) #define SATA_P0PHYCR_CR_CAP_ADDR (1 << 16) #define SATA_P0PHYCR_CR_DATA_IN(v) ((v) & 0xffff) #define SATA_P0PHYSR 0x0000017c #define SATA_P0PHYSR_CR_ACK (1 << 18) #define SATA_P0PHYSR_CR_DATA_OUT(v) ((v) & 0xffff) /* phy registers */ #define SATA_PHY_CLOCK_RESET 0x7f3f #define SATA_PHY_CLOCK_RESET_RST (1 << 0) #define SATA_PHY_LANE0_OUT_STAT 0x2003 #define SATA_PHY_LANE0_OUT_STAT_RX_PLL_STATE (1 << 1) static struct ofw_compat_data compat_data[] = { {"fsl,imx6q-ahci", true}, {NULL, false} }; static int imx6_ahci_phy_ctrl(struct ahci_controller* sc, uint32_t bitmask, bool on) { uint32_t v; int timeout; bool state; v = ATA_INL(sc->r_mem, SATA_P0PHYCR); if (on) { v |= bitmask; } else { v &= ~bitmask; } ATA_OUTL(sc->r_mem, SATA_P0PHYCR, v); for (timeout = 5000; timeout > 0; --timeout) { v = ATA_INL(sc->r_mem, SATA_P0PHYSR); state = (v & SATA_P0PHYSR_CR_ACK) == SATA_P0PHYSR_CR_ACK; if(state == on) { break; } DELAY(100); } if (timeout > 0) { return (0); } return (ETIMEDOUT); } static int imx6_ahci_phy_addr(struct ahci_controller* sc, uint32_t addr) { int error; DELAY(100); ATA_OUTL(sc->r_mem, SATA_P0PHYCR, addr); error = imx6_ahci_phy_ctrl(sc, SATA_P0PHYCR_CR_CAP_ADDR, true); if (error != 0) { device_printf(sc->dev, "%s: timeout on SATA_P0PHYCR_CR_CAP_ADDR=1\n", __FUNCTION__); return (error); } error = imx6_ahci_phy_ctrl(sc, SATA_P0PHYCR_CR_CAP_ADDR, false); if (error != 0) { device_printf(sc->dev, "%s: timeout on SATA_P0PHYCR_CR_CAP_ADDR=0\n", __FUNCTION__); return (error); } return (0); } static int imx6_ahci_phy_write(struct ahci_controller* sc, uint32_t addr, uint16_t data) { int error; error = imx6_ahci_phy_addr(sc, addr); if (error != 0) { device_printf(sc->dev, "%s: error on imx6_ahci_phy_addr\n", __FUNCTION__); return (error); } ATA_OUTL(sc->r_mem, SATA_P0PHYCR, data); error = imx6_ahci_phy_ctrl(sc, SATA_P0PHYCR_CR_CAP_DATA, true); if (error != 0) { device_printf(sc->dev, "%s: error on SATA_P0PHYCR_CR_CAP_DATA=1\n", __FUNCTION__); return (error); } if (imx6_ahci_phy_ctrl(sc, SATA_P0PHYCR_CR_CAP_DATA, false) != 0) { device_printf(sc->dev, "%s: error on SATA_P0PHYCR_CR_CAP_DATA=0\n", __FUNCTION__); return (error); } if ((addr == SATA_PHY_CLOCK_RESET) && data) { /* we can't check ACK after RESET */ ATA_OUTL(sc->r_mem, SATA_P0PHYCR, SATA_P0PHYCR_CR_DATA_IN(data) | SATA_P0PHYCR_CR_WRITE); return (0); } error = imx6_ahci_phy_ctrl(sc, SATA_P0PHYCR_CR_WRITE, true); if (error != 0) { device_printf(sc->dev, "%s: error on SATA_P0PHYCR_CR_WRITE=1\n", __FUNCTION__); return (error); } error = imx6_ahci_phy_ctrl(sc, SATA_P0PHYCR_CR_WRITE, false); if (error != 0) { device_printf(sc->dev, "%s: error on SATA_P0PHYCR_CR_WRITE=0\n", __FUNCTION__); return (error); } return (0); } static int imx6_ahci_phy_read(struct ahci_controller* sc, uint32_t addr, uint16_t* val) { int error; uint32_t v; error = imx6_ahci_phy_addr(sc, addr); if (error != 0) { device_printf(sc->dev, "%s: error on imx6_ahci_phy_addr\n", __FUNCTION__); return (error); } error = imx6_ahci_phy_ctrl(sc, SATA_P0PHYCR_CR_READ, true); if (error != 0) { device_printf(sc->dev, "%s: error on SATA_P0PHYCR_CR_READ=1\n", __FUNCTION__); return (error); } v = ATA_INL(sc->r_mem, SATA_P0PHYSR); error = imx6_ahci_phy_ctrl(sc, SATA_P0PHYCR_CR_READ, false); if (error != 0) { device_printf(sc->dev, "%s: error on SATA_P0PHYCR_CR_READ=0\n", __FUNCTION__); return (error); } *val = SATA_P0PHYSR_CR_DATA_OUT(v); return (0); } static int imx6_ahci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) { return (ENXIO); } if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) { return (ENXIO); } device_set_desc(dev, "i.MX6 Integrated AHCI controller"); return (BUS_PROBE_DEFAULT); } static int imx6_ahci_attach(device_t dev) { struct ahci_controller* ctlr; uint16_t pllstat; uint32_t v; int error, timeout; ctlr = device_get_softc(dev); /* Power up the controller and phy. */ error = imx6_ccm_sata_enable(); if (error != 0) { device_printf(dev, "error enabling controller and phy\n"); return (error); } ctlr->vendorid = 0; ctlr->deviceid = 0; ctlr->subvendorid = 0; ctlr->subdeviceid = 0; ctlr->numirqs = 1; ctlr->r_rid = 0; if ((ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE)) == NULL) { return (ENXIO); } v = imx_iomux_gpr_get(IOMUX_GPR13); /* Clear out existing values; these numbers are bitmasks. */ v &= ~(IOMUX_GPR13_SATA_PHY_8(7) | IOMUX_GPR13_SATA_PHY_7(0x1f) | IOMUX_GPR13_SATA_PHY_6(7) | IOMUX_GPR13_SATA_SPEED(1) | IOMUX_GPR13_SATA_PHY_5(1) | IOMUX_GPR13_SATA_PHY_4(7) | IOMUX_GPR13_SATA_PHY_3(0xf) | IOMUX_GPR13_SATA_PHY_2(0x1f) | IOMUX_GPR13_SATA_PHY_1(1) | IOMUX_GPR13_SATA_PHY_0(1)); /* setting */ v |= IOMUX_GPR13_SATA_PHY_8(5) | /* Rx 3.0db */ IOMUX_GPR13_SATA_PHY_7(0x12) | /* Rx SATA2m */ IOMUX_GPR13_SATA_PHY_6(3) | /* Rx DPLL mode */ IOMUX_GPR13_SATA_SPEED(1) | /* 3.0GHz */ IOMUX_GPR13_SATA_PHY_5(0) | /* SpreadSpectram */ IOMUX_GPR13_SATA_PHY_4(4) | /* Tx Attenuation 9/16 */ IOMUX_GPR13_SATA_PHY_3(0) | /* Tx Boost 0db */ IOMUX_GPR13_SATA_PHY_2(0x11) | /* Tx Level 1.104V */ IOMUX_GPR13_SATA_PHY_1(1); /* PLL clock enable */ imx_iomux_gpr_set(IOMUX_GPR13, v); /* phy reset */ error = imx6_ahci_phy_write(ctlr, SATA_PHY_CLOCK_RESET, SATA_PHY_CLOCK_RESET_RST); if (error != 0) { device_printf(dev, "cannot reset PHY\n"); goto fail; } for (timeout = 50; timeout > 0; --timeout) { DELAY(100); error = imx6_ahci_phy_read(ctlr, SATA_PHY_LANE0_OUT_STAT, &pllstat); if (error != 0) { device_printf(dev, "cannot read LANE0 status\n"); goto fail; } if (pllstat & SATA_PHY_LANE0_OUT_STAT_RX_PLL_STATE) { break; } } if (timeout <= 0) { device_printf(dev, "time out reading LANE0 status\n"); error = ETIMEDOUT; goto fail; } /* Support Staggered Spin-up */ v = ATA_INL(ctlr->r_mem, AHCI_CAP); ATA_OUTL(ctlr->r_mem, AHCI_CAP, v | AHCI_CAP_SSS); /* Ports Implemented. must set 1 */ v = ATA_INL(ctlr->r_mem, AHCI_PI); ATA_OUTL(ctlr->r_mem, AHCI_PI, v | (1 << 0)); /* set 1ms-timer = AHB clock / 1000 */ ATA_OUTL(ctlr->r_mem, SATA_TIMER1MS, imx_ccm_ahb_hz() / 1000); /* * Note: ahci_attach will release ctlr->r_mem on errors automatically */ return (ahci_attach(dev)); fail: bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (error); } static int imx6_ahci_detach(device_t dev) { return (ahci_detach(dev)); } static device_method_t imx6_ahci_ata_methods[] = { /* device probe, attach and detach methods */ DEVMETHOD(device_probe, imx6_ahci_probe), DEVMETHOD(device_attach, imx6_ahci_attach), DEVMETHOD(device_detach, imx6_ahci_detach), /* ahci bus methods */ DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr, ahci_teardown_intr), - DEVMETHOD(bus_child_location_str, ahci_child_location_str), + DEVMETHOD(bus_child_location, ahci_child_location), DEVMETHOD_END }; static driver_t ahci_ata_driver = { "ahci", imx6_ahci_ata_methods, sizeof(struct ahci_controller) }; DRIVER_MODULE(imx6_ahci, simplebus, ahci_ata_driver, ahci_devclass, 0, 0); MODULE_DEPEND(imx6_ahci, ahci, 1, 1, 1); SIMPLEBUS_PNP_INFO(compat_data) diff --git a/sys/arm/nvidia/tegra_ahci.c b/sys/arm/nvidia/tegra_ahci.c index 725fc999a3a3..80416bf3582a 100644 --- a/sys/arm/nvidia/tegra_ahci.c +++ b/sys/arm/nvidia/tegra_ahci.c @@ -1,785 +1,785 @@ /*- * Copyright (c) 2016 Michal Meloun * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * AHCI driver for Tegra SoCs. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SATA_CONFIGURATION 0x180 #define SATA_CONFIGURATION_CLK_OVERRIDE (1U << 31) #define SATA_CONFIGURATION_EN_FPCI (1 << 0) #define SATA_FPCI_BAR5 0x94 #define SATA_FPCI_BAR_START(x) (((x) & 0xFFFFFFF) << 4) #define SATA_FPCI_BAR_ACCESS_TYPE (1 << 0) #define SATA_INTR_MASK 0x188 #define SATA_INTR_MASK_IP_INT_MASK (1 << 16) #define SCFG_OFFSET 0x1000 #define T_SATA0_CFG_1 0x04 #define T_SATA0_CFG_1_IO_SPACE (1 << 0) #define T_SATA0_CFG_1_MEMORY_SPACE (1 << 1) #define T_SATA0_CFG_1_BUS_MASTER (1 << 2) #define T_SATA0_CFG_1_SERR (1 << 8) #define T_SATA0_CFG_9 0x24 #define T_SATA0_CFG_9_BASE_ADDRESS_SHIFT 13 #define T_SATA0_CFG_35 0x94 #define T_SATA0_CFG_35_IDP_INDEX_MASK (0x7ff << 2) #define T_SATA0_CFG_35_IDP_INDEX (0x2a << 2) #define T_SATA0_AHCI_IDP1 0x98 #define T_SATA0_AHCI_IDP1_DATA 0x400040 #define T_SATA0_CFG_PHY_1 0x12c #define T_SATA0_CFG_PHY_1_PADS_IDDQ_EN (1 << 23) #define T_SATA0_CFG_PHY_1_PAD_PLL_IDDQ_EN (1 << 22) #define T_SATA0_NVOOB 0x114 #define T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH_MASK (0x3 << 26) #define T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH (0x3 << 26) #define T_SATA0_NVOOB_SQUELCH_FILTER_MODE_MASK (0x3 << 24) #define T_SATA0_NVOOB_SQUELCH_FILTER_MODE (0x1 << 24) #define T_SATA0_NVOOB_COMMA_CNT_MASK (0xff << 16) #define T_SATA0_NVOOB_COMMA_CNT (0x07 << 16) #define T_SATA0_CFG_PHY 0x120 #define T_SATA0_CFG_PHY_MASK_SQUELCH (1 << 24) #define T_SATA0_CFG_PHY_USE_7BIT_ALIGN_DET_FOR_SPD (1 << 11) #define T_SATA0_CFG2NVOOB_2 0x134 #define T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW_MASK (0x1ff << 18) #define T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW (0xc << 18) #define T_SATA0_AHCI_HBA_CAP_BKDR 0x300 #define T_SATA0_AHCI_HBA_CAP_BKDR_SNCQ (1 << 30) #define T_SATA0_AHCI_HBA_CAP_BKDR_SUPP_PM (1 << 17) #define T_SATA0_AHCI_HBA_CAP_BKDR_SALP (1 << 26) #define T_SATA0_AHCI_HBA_CAP_BKDR_SLUMBER_ST_CAP (1 << 14) #define T_SATA0_AHCI_HBA_CAP_BKDR_PARTIAL_ST_CAP (1 << 13) #define T_SATA0_BKDOOR_CC 0x4a4 #define T_SATA0_BKDOOR_CC_CLASS_CODE_MASK (0xffff << 16) #define T_SATA0_BKDOOR_CC_CLASS_CODE (0x0106 << 16) #define T_SATA0_BKDOOR_CC_PROG_IF_MASK (0xff << 8) #define T_SATA0_BKDOOR_CC_PROG_IF (0x01 << 8) #define T_SATA0_CFG_SATA 0x54c #define T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN (1 << 12) #define T_SATA0_CFG_MISC 0x550 #define T_SATA0_INDEX 0x680 #define T_SATA0_CHX_PHY_CTRL1_GEN1 0x690 #define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_MASK 0xff #define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT 8 #define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_MASK 0xff #define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT 0 #define T_SATA0_CHX_PHY_CTRL1_GEN2 0x694 #define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_MASK 0xff #define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_SHIFT 12 #define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_MASK 0xff #define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_SHIFT 0 #define T_SATA0_CHX_PHY_CTRL2 0x69c #define T_SATA0_CHX_PHY_CTRL2_CDR_CNTL_GEN1 0x23 #define T_SATA0_CHX_PHY_CTRL11 0x6d0 #define T_SATA0_CHX_PHY_CTRL11_GEN2_RX_EQ (0x2800 << 16) #define T_SATA0_CHX_PHY_CTRL17 0x6e8 #define T_SATA0_CHX_PHY_CTRL18 0x6ec #define T_SATA0_CHX_PHY_CTRL20 0x6f4 #define T_SATA0_CHX_PHY_CTRL21 0x6f8 #define FUSE_SATA_CALIB 0x124 #define FUSE_SATA_CALIB_MASK 0x3 #define SATA_AUX_MISC_CNTL 0x1108 #define SATA_AUX_PAD_PLL_CTRL_0 0x1120 #define SATA_AUX_PAD_PLL_CTRL_1 0x1124 #define SATA_AUX_PAD_PLL_CTRL_2 0x1128 #define SATA_AUX_PAD_PLL_CTRL_3 0x112c #define T_AHCI_HBA_CCC_PORTS 0x0018 #define T_AHCI_HBA_CAP_BKDR 0x00A0 #define T_AHCI_HBA_CAP_BKDR_S64A (1 << 31) #define T_AHCI_HBA_CAP_BKDR_SNCQ (1 << 30) #define T_AHCI_HBA_CAP_BKDR_SSNTF (1 << 29) #define T_AHCI_HBA_CAP_BKDR_SMPS (1 << 28) #define T_AHCI_HBA_CAP_BKDR_SUPP_STG_SPUP (1 << 27) #define T_AHCI_HBA_CAP_BKDR_SALP (1 << 26) #define T_AHCI_HBA_CAP_BKDR_SAL (1 << 25) #define T_AHCI_HBA_CAP_BKDR_SUPP_CLO (1 << 24) #define T_AHCI_HBA_CAP_BKDR_INTF_SPD_SUPP(x) (((x) & 0xF) << 20) #define T_AHCI_HBA_CAP_BKDR_SUPP_NONZERO_OFFSET (1 << 19) #define T_AHCI_HBA_CAP_BKDR_SUPP_AHCI_ONLY (1 << 18) #define T_AHCI_HBA_CAP_BKDR_SUPP_PM (1 << 17) #define T_AHCI_HBA_CAP_BKDR_FIS_SWITCHING (1 << 16) #define T_AHCI_HBA_CAP_BKDR_PIO_MULT_DRQ_BLK (1 << 15) #define T_AHCI_HBA_CAP_BKDR_SLUMBER_ST_CAP (1 << 14) #define T_AHCI_HBA_CAP_BKDR_PARTIAL_ST_CAP (1 << 13) #define T_AHCI_HBA_CAP_BKDR_NUM_CMD_SLOTS(x) (((x) & 0x1F) << 8) #define T_AHCI_HBA_CAP_BKDR_CMD_CMPL_COALESING (1 << 7) #define T_AHCI_HBA_CAP_BKDR_ENCL_MGMT_SUPP (1 << 6) #define T_AHCI_HBA_CAP_BKDR_EXT_SATA (1 << 5) #define T_AHCI_HBA_CAP_BKDR_NUM_PORTS(x) (((x) & 0xF) << 0) #define T_AHCI_PORT_BKDR 0x0170 #define T_AHCI_PORT_BKDR_PXDEVSLP_DETO_OVERRIDE_VAL(x) (((x) & 0xFF) << 24) #define T_AHCI_PORT_BKDR_PXDEVSLP_MDAT_OVERRIDE_VAL(x) (((x) & 0x1F) << 16) #define T_AHCI_PORT_BKDR_PXDEVSLP_DETO_OVERRIDE (1 << 15) #define T_AHCI_PORT_BKDR_PXDEVSLP_MDAT_OVERRIDE (1 << 14) #define T_AHCI_PORT_BKDR_PXDEVSLP_DM(x) (((x) & 0xF) << 10) #define T_AHCI_PORT_BKDR_PORT_UNCONNECTED (1 << 9) #define T_AHCI_PORT_BKDR_CLK_CLAMP_CTRL_CLAMP_THIS_CH (1 << 8) #define T_AHCI_PORT_BKDR_CLK_CLAMP_CTRL_TXRXCLK_UNCLAMP (1 << 7) #define T_AHCI_PORT_BKDR_CLK_CLAMP_CTRL_TXRXCLK_CLAMP (1 << 6) #define T_AHCI_PORT_BKDR_CLK_CLAMP_CTRL_DEVCLK_UNCLAMP (1 << 5) #define T_AHCI_PORT_BKDR_CLK_CLAMP_CTRL_DEVCLK_CLAMP (1 << 4) #define T_AHCI_PORT_BKDR_HOTPLUG_CAP (1 << 3) #define T_AHCI_PORT_BKDR_MECH_SWITCH (1 << 2) #define T_AHCI_PORT_BKDR_COLD_PRSN_DET (1 << 1) #define T_AHCI_PORT_BKDR_EXT_SATA_SUPP (1 << 0) /* AUX registers */ #define SATA_AUX_MISC_CNTL_1 0x008 #define SATA_AUX_MISC_CNTL_1_DEVSLP_OVERRIDE (1 << 17) #define SATA_AUX_MISC_CNTL_1_SDS_SUPPORT (1 << 13) #define SATA_AUX_MISC_CNTL_1_DESO_SUPPORT (1 << 15) #define AHCI_WR4(_sc, _r, _v) bus_write_4((_sc)->ctlr.r_mem, (_r), (_v)) #define AHCI_RD4(_sc, _r) bus_read_4((_sc)->ctlr.r_mem, (_r)) #define SATA_WR4(_sc, _r, _v) bus_write_4((_sc)->sata_mem, (_r), (_v)) #define SATA_RD4(_sc, _r) bus_read_4((_sc)->sata_mem, (_r)) struct sata_pad_calibration { uint32_t gen1_tx_amp; uint32_t gen1_tx_peak; uint32_t gen2_tx_amp; uint32_t gen2_tx_peak; }; static const struct sata_pad_calibration tegra124_pad_calibration[] = { {0x18, 0x04, 0x18, 0x0a}, {0x0e, 0x04, 0x14, 0x0a}, {0x0e, 0x07, 0x1a, 0x0e}, {0x14, 0x0e, 0x1a, 0x0e}, }; struct ahci_soc; struct tegra_ahci_sc { struct ahci_controller ctlr; /* Must be first */ device_t dev; struct ahci_soc *soc; struct resource *sata_mem; struct resource *aux_mem; clk_t clk_sata; clk_t clk_sata_oob; clk_t clk_pll_e; clk_t clk_cml; hwreset_t hwreset_sata; hwreset_t hwreset_sata_oob; hwreset_t hwreset_sata_cold; regulator_t regulators[16]; /* Safe maximum */ phy_t phy; }; struct ahci_soc { char **regulator_names; int (*init)(struct tegra_ahci_sc *sc); }; /* Tegra 124 config. */ static char *tegra124_reg_names[] = { "hvdd-supply", "vddio-supply", "avdd-supply", "target-5v-supply", "target-12v-supply", NULL }; static int tegra124_ahci_init(struct tegra_ahci_sc *sc); static struct ahci_soc tegra124_soc = { .regulator_names = tegra124_reg_names, .init = tegra124_ahci_init, }; /* Tegra 210 config. */ static char *tegra210_reg_names[] = { NULL }; static struct ahci_soc tegra210_soc = { .regulator_names = tegra210_reg_names, }; static struct ofw_compat_data compat_data[] = { {"nvidia,tegra124-ahci", (uintptr_t)&tegra124_soc}, {"nvidia,tegra210-ahci", (uintptr_t)&tegra210_soc}, {NULL, 0} }; static int get_fdt_resources(struct tegra_ahci_sc *sc, phandle_t node) { int i, rv; /* Regulators. */ for (i = 0; sc->soc->regulator_names[i] != NULL; i++) { if (i >= nitems(sc->regulators)) { device_printf(sc->dev, "Too many regulators present in DT.\n"); return (EOVERFLOW); } rv = regulator_get_by_ofw_property(sc->dev, 0, sc->soc->regulator_names[i], sc->regulators + i); if (rv != 0) { device_printf(sc->dev, "Cannot get '%s' regulator\n", sc->soc->regulator_names[i]); return (ENXIO); } } /* Resets. */ rv = hwreset_get_by_ofw_name(sc->dev, 0, "sata", &sc->hwreset_sata ); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sata' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "sata-oob", &sc->hwreset_sata_oob); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sata oob' reset\n"); return (ENXIO); } rv = hwreset_get_by_ofw_name(sc->dev, 0, "sata-cold", &sc->hwreset_sata_cold); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sata cold' reset\n"); return (ENXIO); } /* Phy */ rv = phy_get_by_ofw_name(sc->dev, 0, "sata-0", &sc->phy); if (rv != 0) { rv = phy_get_by_ofw_idx(sc->dev, 0, 0, &sc->phy); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sata' phy\n"); return (ENXIO); } } /* Clocks. */ rv = clk_get_by_ofw_name(sc->dev, 0, "sata", &sc->clk_sata); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sata' clock\n"); return (ENXIO); } rv = clk_get_by_ofw_name(sc->dev, 0, "sata-oob", &sc->clk_sata_oob); if (rv != 0) { device_printf(sc->dev, "Cannot get 'sata oob' clock\n"); return (ENXIO); } /* These are optional */ rv = clk_get_by_ofw_name(sc->dev, 0, "cml1", &sc->clk_cml); if (rv != 0) sc->clk_cml = NULL; rv = clk_get_by_ofw_name(sc->dev, 0, "pll_e", &sc->clk_pll_e); if (rv != 0) sc->clk_pll_e = NULL; return (0); } static int enable_fdt_resources(struct tegra_ahci_sc *sc) { int i, rv; /* Enable regulators. */ for (i = 0; i < nitems(sc->regulators); i++) { if (sc->regulators[i] == NULL) continue; rv = regulator_enable(sc->regulators[i]); if (rv != 0) { device_printf(sc->dev, "Cannot enable '%s' regulator\n", sc->soc->regulator_names[i]); return (rv); } } /* Stop clocks */ clk_stop(sc->clk_sata); clk_stop(sc->clk_sata_oob); tegra_powergate_power_off(TEGRA_POWERGATE_SAX); rv = hwreset_assert(sc->hwreset_sata); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'sata' reset\n"); return (rv); } rv = hwreset_assert(sc->hwreset_sata_oob); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'sata oob' reset\n"); return (rv); } rv = hwreset_assert(sc->hwreset_sata_cold); if (rv != 0) { device_printf(sc->dev, "Cannot assert 'sata cold' reset\n"); return (rv); } rv = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_SAX, sc->clk_sata, sc->hwreset_sata); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'SAX' powergate\n"); return (rv); } rv = clk_enable(sc->clk_sata_oob); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'sata oob' clock\n"); return (rv); } if (sc->clk_cml != NULL) { rv = clk_enable(sc->clk_cml); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'cml' clock\n"); return (rv); } } if (sc->clk_pll_e != NULL) { rv = clk_enable(sc->clk_pll_e); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'pll e' clock\n"); return (rv); } } rv = hwreset_deassert(sc->hwreset_sata_cold); if (rv != 0) { device_printf(sc->dev, "Cannot unreset 'sata cold' reset\n"); return (rv); } rv = hwreset_deassert(sc->hwreset_sata_oob); if (rv != 0) { device_printf(sc->dev, "Cannot unreset 'sata oob' reset\n"); return (rv); } rv = phy_enable(sc->phy); if (rv != 0) { device_printf(sc->dev, "Cannot enable SATA phy\n"); return (rv); } return (0); } static int tegra124_ahci_init(struct tegra_ahci_sc *sc) { uint32_t val; const struct sata_pad_calibration *calib; /* Pad calibration. */ val = tegra_fuse_read_4(FUSE_SATA_CALIB); calib = tegra124_pad_calibration + (val & FUSE_SATA_CALIB_MASK); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_INDEX, 1); val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN1); val &= ~(T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_MASK << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT); val &= ~(T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_MASK << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT); val |= calib->gen1_tx_amp << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT; val |= calib->gen1_tx_peak << T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN1, val); val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN2); val &= ~(T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_MASK << T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_SHIFT); val &= ~(T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_MASK << T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_SHIFT); val |= calib->gen2_tx_amp << T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_SHIFT; val |= calib->gen2_tx_peak << T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_SHIFT; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN2, val); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL11, T_SATA0_CHX_PHY_CTRL11_GEN2_RX_EQ); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL2, T_SATA0_CHX_PHY_CTRL2_CDR_CNTL_GEN1); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_INDEX, 0); return (0); } static int tegra_ahci_ctrl_init(struct tegra_ahci_sc *sc) { uint32_t val; int rv; /* Enable SATA MMIO. */ val = SATA_RD4(sc, SATA_FPCI_BAR5); val &= ~SATA_FPCI_BAR_START(~0); val |= SATA_FPCI_BAR_START(0x10000); val |= SATA_FPCI_BAR_ACCESS_TYPE; SATA_WR4(sc, SATA_FPCI_BAR5, val); /* Enable FPCI access */ val = SATA_RD4(sc, SATA_CONFIGURATION); val |= SATA_CONFIGURATION_EN_FPCI; SATA_WR4(sc, SATA_CONFIGURATION, val); /* Recommended electrical settings for phy */ SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL17, 0x55010000); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL18, 0x55010000); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL20, 0x1); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL21, 0x1); /* SQUELCH and Gen3 */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG_PHY); val |= T_SATA0_CFG_PHY_MASK_SQUELCH; val &= ~T_SATA0_CFG_PHY_USE_7BIT_ALIGN_DET_FOR_SPD; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_PHY, val); val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_NVOOB); val &= ~T_SATA0_NVOOB_COMMA_CNT_MASK; val &= ~T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH_MASK; val &= ~T_SATA0_NVOOB_SQUELCH_FILTER_MODE_MASK; val |= T_SATA0_NVOOB_COMMA_CNT; val |= T_SATA0_NVOOB_SQUELCH_FILTER_LENGTH; val |= T_SATA0_NVOOB_SQUELCH_FILTER_MODE; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_NVOOB, val); /* Setup COMWAKE_IDLE_CNT */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG2NVOOB_2); val &= ~T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW_MASK; val |= T_SATA0_CFG2NVOOB_2_COMWAKE_IDLE_CNT_LOW; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG2NVOOB_2, val); if (sc->soc->init != NULL) { rv = sc->soc->init(sc); if (rv != 0) { device_printf(sc->dev, "SOC specific intialization failed: %d\n", rv); return (rv); } } /* Enable backdoor programming. */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG_SATA); val |= T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_SATA, val); /* Set device class and interface */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_BKDOOR_CC); val &= ~T_SATA0_BKDOOR_CC_CLASS_CODE_MASK; val &= ~T_SATA0_BKDOOR_CC_PROG_IF_MASK; val |= T_SATA0_BKDOOR_CC_CLASS_CODE; val |= T_SATA0_BKDOOR_CC_PROG_IF; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_BKDOOR_CC, val); /* Enable LPM capabilities */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_AHCI_HBA_CAP_BKDR); val |= T_SATA0_AHCI_HBA_CAP_BKDR_PARTIAL_ST_CAP; val |= T_SATA0_AHCI_HBA_CAP_BKDR_SLUMBER_ST_CAP; val |= T_SATA0_AHCI_HBA_CAP_BKDR_SALP; val |= T_SATA0_AHCI_HBA_CAP_BKDR_SUPP_PM; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_AHCI_HBA_CAP_BKDR, val); /* Disable backdoor programming. */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG_SATA); val &= ~T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_SATA, val); /* SATA Second Level Clock Gating */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG_35); val &= ~T_SATA0_CFG_35_IDP_INDEX_MASK; val |= T_SATA0_CFG_35_IDP_INDEX; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_35, val); SATA_WR4(sc, SCFG_OFFSET + T_SATA0_AHCI_IDP1, 0x400040); val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG_PHY_1); val |= T_SATA0_CFG_PHY_1_PADS_IDDQ_EN; val |= T_SATA0_CFG_PHY_1_PAD_PLL_IDDQ_EN; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_PHY_1, val); /* * Indicate Sata only has the capability to enter DevSleep * from slumber link. */ if (sc->aux_mem != NULL) { val = bus_read_4(sc->aux_mem, SATA_AUX_MISC_CNTL_1); val |= SATA_AUX_MISC_CNTL_1_DESO_SUPPORT; bus_write_4(sc->aux_mem, SATA_AUX_MISC_CNTL_1, val); } /* Enable IPFS Clock Gating */ val = SATA_RD4(sc, SCFG_OFFSET + SATA_CONFIGURATION); val &= ~SATA_CONFIGURATION_CLK_OVERRIDE; SATA_WR4(sc, SCFG_OFFSET + SATA_CONFIGURATION, val); /* Enable IO & memory access, bus master mode */ val = SATA_RD4(sc, SCFG_OFFSET + T_SATA0_CFG_1); val |= T_SATA0_CFG_1_IO_SPACE; val |= T_SATA0_CFG_1_MEMORY_SPACE; val |= T_SATA0_CFG_1_BUS_MASTER; val |= T_SATA0_CFG_1_SERR; SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_1, val); /* AHCI bar */ SATA_WR4(sc, SCFG_OFFSET + T_SATA0_CFG_9, 0x08000 << T_SATA0_CFG_9_BASE_ADDRESS_SHIFT); /* Unmask interrupts. */ val = SATA_RD4(sc, SATA_INTR_MASK); val |= SATA_INTR_MASK_IP_INT_MASK; SATA_WR4(sc, SATA_INTR_MASK, val); return (0); } static int tegra_ahci_ctlr_reset(device_t dev) { struct tegra_ahci_sc *sc; int rv; uint32_t reg; sc = device_get_softc(dev); rv = ahci_ctlr_reset(dev); if (rv != 0) return (0); AHCI_WR4(sc, T_AHCI_HBA_CCC_PORTS, 1); /* Overwrite AHCI capabilites. */ reg = AHCI_RD4(sc, T_AHCI_HBA_CAP_BKDR); reg &= ~T_AHCI_HBA_CAP_BKDR_NUM_PORTS(~0); reg |= T_AHCI_HBA_CAP_BKDR_NUM_PORTS(0); reg |= T_AHCI_HBA_CAP_BKDR_EXT_SATA; reg |= T_AHCI_HBA_CAP_BKDR_ENCL_MGMT_SUPP; reg |= T_AHCI_HBA_CAP_BKDR_CMD_CMPL_COALESING; reg |= T_AHCI_HBA_CAP_BKDR_FIS_SWITCHING; reg |= T_AHCI_HBA_CAP_BKDR_SUPP_PM; reg |= T_AHCI_HBA_CAP_BKDR_SUPP_CLO; reg |= T_AHCI_HBA_CAP_BKDR_SUPP_STG_SPUP; AHCI_WR4(sc, T_AHCI_HBA_CAP_BKDR, reg); /* Overwrite AHCI portcapabilites. */ reg = AHCI_RD4(sc, T_AHCI_PORT_BKDR); reg |= T_AHCI_PORT_BKDR_COLD_PRSN_DET; reg |= T_AHCI_PORT_BKDR_HOTPLUG_CAP; reg |= T_AHCI_PORT_BKDR_EXT_SATA_SUPP; AHCI_WR4(sc, T_AHCI_PORT_BKDR, reg); return (0); } static int tegra_ahci_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc_copy(dev, "AHCI SATA controller"); return (BUS_PROBE_DEFAULT); } static int tegra_ahci_attach(device_t dev) { struct tegra_ahci_sc *sc; struct ahci_controller *ctlr; phandle_t node; int rv, rid; sc = device_get_softc(dev); sc->dev = dev; ctlr = &sc->ctlr; node = ofw_bus_get_node(dev); sc->soc = (struct ahci_soc *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; ctlr->r_rid = 0; ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE); if (ctlr->r_mem == NULL) return (ENXIO); rid = 1; sc->sata_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sata_mem == NULL) { rv = ENXIO; goto fail; } /* Aux is optionall */ rid = 2; sc->aux_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); rv = get_fdt_resources(sc, node); if (rv != 0) { device_printf(sc->dev, "Failed to allocate FDT resource(s)\n"); goto fail; } rv = enable_fdt_resources(sc); if (rv != 0) { device_printf(sc->dev, "Failed to enable FDT resource(s)\n"); goto fail; } rv = tegra_ahci_ctrl_init(sc); if (rv != 0) { device_printf(sc->dev, "Failed to initialize controller)\n"); goto fail; } /* Setup controller defaults. */ ctlr->msi = 0; ctlr->numirqs = 1; ctlr->ccc = 0; /* Reset controller. */ rv = tegra_ahci_ctlr_reset(dev); if (rv != 0) goto fail; rv = ahci_attach(dev); return (rv); fail: /* XXX FDT stuff */ if (sc->sata_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 1, sc->sata_mem); if (ctlr->r_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (rv); } static int tegra_ahci_detach(device_t dev) { ahci_detach(dev); return (0); } static int tegra_ahci_suspend(device_t dev) { struct tegra_ahci_sc *sc = device_get_softc(dev); bus_generic_suspend(dev); /* Disable interupts, so the state change(s) doesn't trigger. */ ATA_OUTL(sc->ctlr.r_mem, AHCI_GHC, ATA_INL(sc->ctlr.r_mem, AHCI_GHC) & (~AHCI_GHC_IE)); return (0); } static int tegra_ahci_resume(device_t dev) { int res; if ((res = tegra_ahci_ctlr_reset(dev)) != 0) return (res); ahci_ctlr_setup(dev); return (bus_generic_resume(dev)); } static device_method_t tegra_ahci_methods[] = { DEVMETHOD(device_probe, tegra_ahci_probe), DEVMETHOD(device_attach, tegra_ahci_attach), DEVMETHOD(device_detach, tegra_ahci_detach), DEVMETHOD(device_suspend, tegra_ahci_suspend), DEVMETHOD(device_resume, tegra_ahci_resume), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr, ahci_teardown_intr), - DEVMETHOD(bus_child_location_str, ahci_child_location_str), + DEVMETHOD(bus_child_location, ahci_child_location), DEVMETHOD(bus_get_dma_tag, ahci_get_dma_tag), DEVMETHOD_END }; static DEFINE_CLASS_0(ahci, tegra_ahci_driver, tegra_ahci_methods, sizeof(struct tegra_ahci_sc)); DRIVER_MODULE(tegra_ahci, simplebus, tegra_ahci_driver, ahci_devclass, NULL, NULL); diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c index 18623a6ab240..02358d407c66 100644 --- a/sys/dev/acpica/acpi.c +++ b/sys/dev/acpica/acpi.c @@ -1,4375 +1,4369 @@ /*- * Copyright (c) 2000 Takanori Watanabe * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000, 2001 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices"); /* Hooks for the ACPI CA debugging infrastructure */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("ACPI") static d_open_t acpiopen; static d_close_t acpiclose; static d_ioctl_t acpiioctl; static struct cdevsw acpi_cdevsw = { .d_version = D_VERSION, .d_open = acpiopen, .d_close = acpiclose, .d_ioctl = acpiioctl, .d_name = "acpi", }; struct acpi_interface { ACPI_STRING *data; int num; }; static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL }; static char *pcilink_ids[] = { "PNP0C0F", NULL }; /* Global mutex for locking access to the ACPI subsystem. */ struct mtx acpi_mutex; struct callout acpi_sleep_timer; /* Bitmap of device quirks. */ int acpi_quirks; /* Supported sleep states. */ static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT]; static void acpi_lookup(void *arg, const char *name, device_t *dev); static int acpi_modevent(struct module *mod, int event, void *junk); static int acpi_probe(device_t dev); static int acpi_attach(device_t dev); static int acpi_suspend(device_t dev); static int acpi_resume(device_t dev); static int acpi_shutdown(device_t dev); static device_t acpi_add_child(device_t bus, u_int order, const char *name, int unit); static int acpi_print_child(device_t bus, device_t child); static void acpi_probe_nomatch(device_t bus, device_t child); static void acpi_driver_added(device_t dev, driver_t *driver); static void acpi_child_deleted(device_t dev, device_t child); static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value); static struct resource_list *acpi_get_rlist(device_t dev, device_t child); static void acpi_reserve_resources(device_t dev); static int acpi_sysres_alloc(device_t dev); static int acpi_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count); static struct resource *acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int acpi_adjust_resource(device_t bus, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end); static int acpi_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r); static void acpi_delete_resource(device_t bus, device_t child, int type, int rid); static uint32_t acpi_isa_get_logicalid(device_t dev); static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count); static int acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match); static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret); static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *context, void **retval); static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev, int max_depth, acpi_scan_cb_t user_fn, void *arg); static int acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids); static void acpi_platform_osc(device_t dev); static void acpi_probe_children(device_t bus); static void acpi_probe_order(ACPI_HANDLE handle, int *order); static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static void acpi_sleep_enable(void *arg); static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc); static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state); static void acpi_shutdown_final(void *arg, int howto); static void acpi_enable_fixed_events(struct acpi_softc *sc); static void acpi_resync_clock(struct acpi_softc *sc); static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate); static int acpi_wake_prep_walk(int sstate); static int acpi_wake_sysctl_walk(device_t dev); static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS); static void acpi_system_eventhandler_sleep(void *arg, int state); static void acpi_system_eventhandler_wakeup(void *arg, int state); static int acpi_sname2sstate(const char *sname); static const char *acpi_sstate2sname(int sstate); static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS); static int acpi_pm_func(u_long cmd, void *arg, ...); -static int acpi_child_location_str_method(device_t acdev, device_t child, - char *buf, size_t buflen); -static int acpi_child_pnpinfo_str_method(device_t acdev, device_t child, - char *buf, size_t buflen); +static int acpi_child_location_method(device_t acdev, device_t child, + struct sbuf *sb); +static int acpi_child_pnpinfo_method(device_t acdev, device_t child, + struct sbuf *sb); static void acpi_enable_pcie(void); static void acpi_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp); static void acpi_reset_interfaces(device_t dev); static device_method_t acpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_probe), DEVMETHOD(device_attach, acpi_attach), DEVMETHOD(device_shutdown, acpi_shutdown), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_suspend, acpi_suspend), DEVMETHOD(device_resume, acpi_resume), /* Bus interface */ DEVMETHOD(bus_add_child, acpi_add_child), DEVMETHOD(bus_print_child, acpi_print_child), DEVMETHOD(bus_probe_nomatch, acpi_probe_nomatch), DEVMETHOD(bus_driver_added, acpi_driver_added), DEVMETHOD(bus_child_deleted, acpi_child_deleted), DEVMETHOD(bus_read_ivar, acpi_read_ivar), DEVMETHOD(bus_write_ivar, acpi_write_ivar), DEVMETHOD(bus_get_resource_list, acpi_get_rlist), DEVMETHOD(bus_set_resource, acpi_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_alloc_resource, acpi_alloc_resource), DEVMETHOD(bus_adjust_resource, acpi_adjust_resource), DEVMETHOD(bus_release_resource, acpi_release_resource), DEVMETHOD(bus_delete_resource, acpi_delete_resource), - DEVMETHOD(bus_child_pnpinfo_str, acpi_child_pnpinfo_str_method), - DEVMETHOD(bus_child_location_str, acpi_child_location_str_method), + DEVMETHOD(bus_child_pnpinfo, acpi_child_pnpinfo_method), + DEVMETHOD(bus_child_location, acpi_child_location_method), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit), DEVMETHOD(bus_get_cpus, acpi_get_cpus), DEVMETHOD(bus_get_domain, acpi_get_domain), /* ACPI bus */ DEVMETHOD(acpi_id_probe, acpi_device_id_probe), DEVMETHOD(acpi_evaluate_object, acpi_device_eval_obj), DEVMETHOD(acpi_pwr_for_sleep, acpi_device_pwr_for_sleep), DEVMETHOD(acpi_scan_children, acpi_device_scan_children), /* ISA emulation */ DEVMETHOD(isa_pnp_probe, acpi_isa_pnp_probe), DEVMETHOD_END }; static driver_t acpi_driver = { "acpi", acpi_methods, sizeof(struct acpi_softc), }; static devclass_t acpi_devclass; EARLY_DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(acpi, 1); ACPI_SERIAL_DECL(acpi, "ACPI root bus"); /* Local pools for managing system resources for ACPI child devices. */ static struct rman acpi_rman_io, acpi_rman_mem; #define ACPI_MINIMUM_AWAKETIME 5 /* Holds the description of the acpi0 device. */ static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2]; SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ACPI debugging"); static char acpi_ca_version[12]; SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD, acpi_ca_version, 0, "Version of Intel ACPI-CA"); /* * Allow overriding _OSI methods. */ static char acpi_install_interface[256]; TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface, sizeof(acpi_install_interface)); static char acpi_remove_interface[256]; TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface, sizeof(acpi_remove_interface)); /* Allow users to dump Debug objects without ACPI debugger. */ static int acpi_debug_objects; TUNABLE_INT("debug.acpi.enable_debug_objects", &acpi_debug_objects); SYSCTL_PROC(_debug_acpi, OID_AUTO, enable_debug_objects, CTLFLAG_RW | CTLTYPE_INT | CTLFLAG_NEEDGIANT, NULL, 0, acpi_debug_objects_sysctl, "I", "Enable Debug objects"); /* Allow the interpreter to ignore common mistakes in BIOS. */ static int acpi_interpreter_slack = 1; TUNABLE_INT("debug.acpi.interpreter_slack", &acpi_interpreter_slack); SYSCTL_INT(_debug_acpi, OID_AUTO, interpreter_slack, CTLFLAG_RDTUN, &acpi_interpreter_slack, 1, "Turn on interpreter slack mode."); /* Ignore register widths set by FADT and use default widths instead. */ static int acpi_ignore_reg_width = 1; TUNABLE_INT("debug.acpi.default_register_width", &acpi_ignore_reg_width); SYSCTL_INT(_debug_acpi, OID_AUTO, default_register_width, CTLFLAG_RDTUN, &acpi_ignore_reg_width, 1, "Ignore register widths set by FADT"); /* Allow users to override quirks. */ TUNABLE_INT("debug.acpi.quirks", &acpi_quirks); int acpi_susp_bounce; SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW, &acpi_susp_bounce, 0, "Don't actually suspend, just test devices."); /* * ACPI can only be loaded as a module by the loader; activating it after * system bootstrap time is not useful, and can be fatal to the system. * It also cannot be unloaded, since the entire system bus hierarchy hangs * off it. */ static int acpi_modevent(struct module *mod, int event, void *junk) { switch (event) { case MOD_LOAD: if (!cold) { printf("The ACPI driver cannot be loaded after boot.\n"); return (EPERM); } break; case MOD_UNLOAD: if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI) return (EBUSY); break; default: break; } return (0); } /* * Perform early initialization. */ ACPI_STATUS acpi_Startup(void) { static int started = 0; ACPI_STATUS status; int val; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Only run the startup code once. The MADT driver also calls this. */ if (started) return_VALUE (AE_OK); started = 1; /* * Initialize the ACPICA subsystem. */ if (ACPI_FAILURE(status = AcpiInitializeSubsystem())) { printf("ACPI: Could not initialize Subsystem: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing * if more tables exist. */ if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) { printf("ACPI: Table initialisation failed: %s\n", AcpiFormatException(status)); return_VALUE (status); } /* Set up any quirks we have for this system. */ if (acpi_quirks == ACPI_Q_OK) acpi_table_quirks(&acpi_quirks); /* If the user manually set the disabled hint to 0, force-enable ACPI. */ if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0) acpi_quirks &= ~ACPI_Q_BROKEN; if (acpi_quirks & ACPI_Q_BROKEN) { printf("ACPI disabled by blacklist. Contact your BIOS vendor.\n"); status = AE_SUPPORT; } return_VALUE (status); } /* * Detect ACPI and perform early initialisation. */ int acpi_identify(void) { ACPI_TABLE_RSDP *rsdp; ACPI_TABLE_HEADER *rsdt; ACPI_PHYSICAL_ADDRESS paddr; struct sbuf sb; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (!cold) return (ENXIO); /* Check that we haven't been disabled with a hint. */ if (resource_disabled("acpi", 0)) return (ENXIO); /* Check for other PM systems. */ if (power_pm_get_type() != POWER_PM_TYPE_NONE && power_pm_get_type() != POWER_PM_TYPE_ACPI) { printf("ACPI identify failed, other PM system enabled.\n"); return (ENXIO); } /* Initialize root tables. */ if (ACPI_FAILURE(acpi_Startup())) { printf("ACPI: Try disabling either ACPI or apic support.\n"); return (ENXIO); } if ((paddr = AcpiOsGetRootPointer()) == 0 || (rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL) return (ENXIO); if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0) paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress; else paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress; AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP)); if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL) return (ENXIO); sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN); sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE); sbuf_trim(&sb); sbuf_putc(&sb, ' '); sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE); sbuf_trim(&sb); sbuf_finish(&sb); sbuf_delete(&sb); AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER)); snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION); return (0); } /* * Fetch some descriptive data from ACPI to put in our attach message. */ static int acpi_probe(device_t dev) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); device_set_desc(dev, acpi_desc); return_VALUE (BUS_PROBE_NOWILDCARD); } static int acpi_attach(device_t dev) { struct acpi_softc *sc; ACPI_STATUS status; int error, state; UINT32 flags; UINT8 TypeA, TypeB; char *env; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->acpi_dev = dev; callout_init(&sc->susp_force_to, 1); error = ENXIO; /* Initialize resource manager. */ acpi_rman_io.rm_type = RMAN_ARRAY; acpi_rman_io.rm_start = 0; acpi_rman_io.rm_end = 0xffff; acpi_rman_io.rm_descr = "ACPI I/O ports"; if (rman_init(&acpi_rman_io) != 0) panic("acpi rman_init IO ports failed"); acpi_rman_mem.rm_type = RMAN_ARRAY; acpi_rman_mem.rm_descr = "ACPI I/O memory addresses"; if (rman_init(&acpi_rman_mem) != 0) panic("acpi rman_init memory failed"); /* Initialise the ACPI mutex */ mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF); /* * Set the globals from our tunables. This is needed because ACPI-CA * uses UINT8 for some values and we have no tunable_byte. */ AcpiGbl_EnableInterpreterSlack = acpi_interpreter_slack ? TRUE : FALSE; AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; AcpiGbl_UseDefaultRegisterWidths = acpi_ignore_reg_width ? TRUE : FALSE; #ifndef ACPI_DEBUG /* * Disable all debugging layers and levels. */ AcpiDbgLayer = 0; AcpiDbgLevel = 0; #endif /* Override OS interfaces if the user requested. */ acpi_reset_interfaces(dev); /* Load ACPI name space. */ status = AcpiLoadTables(); if (ACPI_FAILURE(status)) { device_printf(dev, "Could not load Namespace: %s\n", AcpiFormatException(status)); goto out; } /* Handle MCFG table if present. */ acpi_enable_pcie(); /* * Note that some systems (specifically, those with namespace evaluation * issues that require the avoidance of parts of the namespace) must * avoid running _INI and _STA on everything, as well as dodging the final * object init pass. * * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT). * * XXX We should arrange for the object init pass after we have attached * all our child devices, but on many systems it works here. */ flags = 0; if (testenv("debug.acpi.avoid")) flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT; /* Bring the hardware and basic handlers online. */ if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) { device_printf(dev, "Could not enable ACPI: %s\n", AcpiFormatException(status)); goto out; } /* * Call the ECDT probe function to provide EC functionality before * the namespace has been evaluated. * * XXX This happens before the sysresource devices have been probed and * attached so its resources come from nexus0. In practice, this isn't * a problem but should be addressed eventually. */ acpi_ec_ecdt_probe(dev); /* Bring device objects and regions online. */ if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) { device_printf(dev, "Could not initialize ACPI objects: %s\n", AcpiFormatException(status)); goto out; } /* * Setup our sysctl tree. * * XXX: This doesn't check to make sure that none of these fail. */ sysctl_ctx_init(&sc->acpi_sysctl_ctx); sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_name(dev), CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 0, 0, acpi_supported_sleep_state_sysctl, "A", "List supported ACPI sleep states."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", "Power button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", "Sleep button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", ""); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0, "sleep delay in seconds"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "disable_on_reboot", CTLFLAG_RW, &sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system"); SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "handle_reboot", CTLFLAG_RW, &sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot"); /* * Default to 1 second before sleeping to give some machines time to * stabilize. */ sc->acpi_sleep_delay = 1; if (bootverbose) sc->acpi_verbose = 1; if ((env = kern_getenv("hw.acpi.verbose")) != NULL) { if (strcmp(env, "0") != 0) sc->acpi_verbose = 1; freeenv(env); } /* Only enable reboot by default if the FADT says it is available. */ if (AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER) sc->acpi_handle_reboot = 1; #if !ACPI_REDUCED_HARDWARE /* Only enable S4BIOS by default if the FACS says it is available. */ if (AcpiGbl_FACS != NULL && AcpiGbl_FACS->Flags & ACPI_FACS_S4_BIOS_PRESENT) sc->acpi_s4bios = 1; #endif /* Probe all supported sleep states. */ acpi_sleep_states[ACPI_STATE_S0] = TRUE; for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT, __DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) && ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) acpi_sleep_states[state] = TRUE; /* * Dispatch the default sleep state to devices. The lid switch is set * to UNKNOWN by default to avoid surprising users. */ sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ? ACPI_STATE_S5 : ACPI_STATE_UNKNOWN; sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN; sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ? ACPI_STATE_S1 : ACPI_STATE_UNKNOWN; sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ? ACPI_STATE_S3 : ACPI_STATE_UNKNOWN; /* Pick the first valid sleep state for the sleep button default. */ sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN; for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++) if (acpi_sleep_states[state]) { sc->acpi_sleep_button_sx = state; break; } acpi_enable_fixed_events(sc); /* * Scan the namespace and attach/initialise children. */ /* Register our shutdown handler. */ EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc, SHUTDOWN_PRI_LAST); /* * Register our acpi event handlers. * XXX should be configurable eg. via userland policy manager. */ EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep, sc, ACPI_EVENT_PRI_LAST); EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup, sc, ACPI_EVENT_PRI_LAST); /* Flag our initial states. */ sc->acpi_enabled = TRUE; sc->acpi_sstate = ACPI_STATE_S0; sc->acpi_sleep_disabled = TRUE; /* Create the control device */ sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0664, "acpi"); sc->acpi_dev_t->si_drv1 = sc; if ((error = acpi_machdep_init(dev))) goto out; /* Register ACPI again to pass the correct argument of pm_func. */ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc); acpi_platform_osc(dev); if (!acpi_disabled("bus")) { EVENTHANDLER_REGISTER(dev_lookup, acpi_lookup, NULL, 1000); acpi_probe_children(dev); } /* Update all GPEs and enable runtime GPEs. */ status = AcpiUpdateAllGpes(); if (ACPI_FAILURE(status)) device_printf(dev, "Could not update all GPEs: %s\n", AcpiFormatException(status)); /* Allow sleep request after a while. */ callout_init_mtx(&acpi_sleep_timer, &acpi_mutex, 0); callout_reset(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME, acpi_sleep_enable, sc); error = 0; out: return_VALUE (error); } static void acpi_set_power_children(device_t dev, int state) { device_t child; device_t *devlist; int dstate, i, numdevs; if (device_get_children(dev, &devlist, &numdevs) != 0) return; /* * Retrieve and set D-state for the sleep state if _SxD is present. * Skip children who aren't attached since they are handled separately. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; dstate = state; if (device_is_attached(child) && acpi_device_pwr_for_sleep(dev, child, &dstate) == 0) acpi_set_powerstate(child, dstate); } free(devlist, M_TEMP); } static int acpi_suspend(device_t dev) { int error; GIANT_REQUIRED; error = bus_generic_suspend(dev); if (error == 0) acpi_set_power_children(dev, ACPI_STATE_D3); return (error); } static int acpi_resume(device_t dev) { GIANT_REQUIRED; acpi_set_power_children(dev, ACPI_STATE_D0); return (bus_generic_resume(dev)); } static int acpi_shutdown(device_t dev) { GIANT_REQUIRED; /* Allow children to shutdown first. */ bus_generic_shutdown(dev); /* * Enable any GPEs that are able to power-on the system (i.e., RTC). * Also, disable any that are not valid for this state (most). */ acpi_wake_prep_walk(ACPI_STATE_S5); return (0); } /* * Handle a new device being added */ static device_t acpi_add_child(device_t bus, u_int order, const char *name, int unit) { struct acpi_device *ad; device_t child; if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL) return (NULL); resource_list_init(&ad->ad_rl); child = device_add_child_ordered(bus, order, name, unit); if (child != NULL) device_set_ivars(child, ad); else free(ad, M_ACPIDEV); return (child); } static int acpi_print_child(device_t bus, device_t child) { struct acpi_device *adev = device_get_ivars(child); struct resource_list *rl = &adev->ad_rl; int retval = 0; retval += bus_print_child_header(bus, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%jd"); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += bus_print_child_domain(bus, child); retval += bus_print_child_footer(bus, child); return (retval); } /* * If this device is an ACPI child but no one claimed it, attempt * to power it off. We'll power it back up when a driver is added. * * XXX Disabled for now since many necessary devices (like fdc and * ATA) don't claim the devices we created for them but still expect * them to be powered up. */ static void acpi_probe_nomatch(device_t bus, device_t child) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D3); #endif } /* * If a new driver has a chance to probe a child, first power it up. * * XXX Disabled for now (see acpi_probe_nomatch for details). */ static void acpi_driver_added(device_t dev, driver_t *driver) { device_t child, *devlist; int i, numdevs; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs)) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) == DS_NOTPRESENT) { #ifdef ACPI_ENABLE_POWERDOWN_NODRIVER acpi_set_powerstate(child, ACPI_STATE_D0); if (device_probe_and_attach(child) != 0) acpi_set_powerstate(child, ACPI_STATE_D3); #else device_probe_and_attach(child); #endif } } free(devlist, M_TEMP); } /* Location hint for devctl(8) */ static int -acpi_child_location_str_method(device_t cbdev, device_t child, char *buf, - size_t buflen) +acpi_child_location_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); - char buf2[32]; int pxm; if (dinfo->ad_handle) { - snprintf(buf, buflen, "handle=%s", acpi_name(dinfo->ad_handle)); + sbuf_printf(sb, "handle=%s", acpi_name(dinfo->ad_handle)); if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ad_handle, "_PXM", &pxm))) { - snprintf(buf2, 32, " _PXM=%d", pxm); - strlcat(buf, buf2, buflen); - } - } else { - snprintf(buf, buflen, ""); + sbuf_printf(sb, " _PXM=%d", pxm); + } } return (0); } /* PnP information for devctl(8) */ int -acpi_pnpinfo_str(ACPI_HANDLE handle, char *buf, size_t buflen) +acpi_pnpinfo(ACPI_HANDLE handle, struct sbuf *sb) { ACPI_DEVICE_INFO *adinfo; if (ACPI_FAILURE(AcpiGetObjectInfo(handle, &adinfo))) { - snprintf(buf, buflen, "unknown"); + sbuf_printf(sb, "unknown"); return (0); } - snprintf(buf, buflen, "_HID=%s _UID=%lu _CID=%s", + sbuf_printf(sb, "_HID=%s _UID=%lu _CID=%s", (adinfo->Valid & ACPI_VALID_HID) ? adinfo->HardwareId.String : "none", (adinfo->Valid & ACPI_VALID_UID) ? strtoul(adinfo->UniqueId.String, NULL, 10) : 0UL, ((adinfo->Valid & ACPI_VALID_CID) && adinfo->CompatibleIdList.Count > 0) ? adinfo->CompatibleIdList.Ids[0].String : "none"); AcpiOsFree(adinfo); return (0); } static int -acpi_child_pnpinfo_str_method(device_t cbdev, device_t child, char *buf, - size_t buflen) +acpi_child_pnpinfo_method(device_t cbdev, device_t child, struct sbuf *sb) { struct acpi_device *dinfo = device_get_ivars(child); - return (acpi_pnpinfo_str(dinfo->ad_handle, buf, buflen)); + return (acpi_pnpinfo(dinfo->ad_handle, sb)); } /* * Handle device deletion. */ static void acpi_child_deleted(device_t dev, device_t child) { struct acpi_device *dinfo = device_get_ivars(child); if (acpi_get_device(dinfo->ad_handle) == child) AcpiDetachData(dinfo->ad_handle, acpi_fake_objhandler); } /* * Handle per-device ivars */ static int acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } /* ACPI and ISA compatibility ivars */ switch(index) { case ACPI_IVAR_HANDLE: *(ACPI_HANDLE *)result = ad->ad_handle; break; case ACPI_IVAR_PRIVATE: *(void **)result = ad->ad_private; break; case ACPI_IVAR_FLAGS: *(int *)result = ad->ad_flags; break; case ISA_IVAR_VENDORID: case ISA_IVAR_SERIAL: case ISA_IVAR_COMPATID: *(int *)result = -1; break; case ISA_IVAR_LOGICALID: *(int *)result = acpi_isa_get_logicalid(child); break; case PCI_IVAR_CLASS: *(uint8_t*)result = (ad->ad_cls_class >> 16) & 0xff; break; case PCI_IVAR_SUBCLASS: *(uint8_t*)result = (ad->ad_cls_class >> 8) & 0xff; break; case PCI_IVAR_PROGIF: *(uint8_t*)result = (ad->ad_cls_class >> 0) & 0xff; break; default: return (ENOENT); } return (0); } static int acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct acpi_device *ad; if ((ad = device_get_ivars(child)) == NULL) { device_printf(child, "device has no ivars\n"); return (ENOENT); } switch(index) { case ACPI_IVAR_HANDLE: ad->ad_handle = (ACPI_HANDLE)value; break; case ACPI_IVAR_PRIVATE: ad->ad_private = (void *)value; break; case ACPI_IVAR_FLAGS: ad->ad_flags = (int)value; break; default: panic("bad ivar write request (%d)", index); return (ENOENT); } return (0); } /* * Handle child resource allocation/removal */ static struct resource_list * acpi_get_rlist(device_t dev, device_t child) { struct acpi_device *ad; ad = device_get_ivars(child); return (&ad->ad_rl); } static int acpi_match_resource_hint(device_t dev, int type, long value) { struct acpi_device *ad = device_get_ivars(dev); struct resource_list *rl = &ad->ad_rl; struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->start <= value && rle->end >= value) return (1); } return (0); } /* * Wire device unit numbers based on resource matches in hints. */ static void acpi_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp) { const char *s; long value; int line, matches, unit; /* * Iterate over all the hints for the devices with the specified * name to see if one's resources are a subset of this device. */ line = 0; while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { /* Must have an "at" for acpi or isa. */ resource_string_value(name, unit, "at", &s); if (!(strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 || strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0)) continue; /* * Check for matching resources. We must have at least one match. * Since I/O and memory resources cannot be shared, if we get a * match on either of those, ignore any mismatches in IRQs or DRQs. * * XXX: We may want to revisit this to be more lenient and wire * as long as it gets one match. */ matches = 0; if (resource_long_value(name, unit, "port", &value) == 0) { /* * Floppy drive controllers are notorious for having a * wide variety of resources not all of which include the * first port that is specified by the hint (typically * 0x3f0) (see the comment above fdc_isa_alloc_resources() * in fdc_isa.c). However, they do all seem to include * port + 2 (e.g. 0x3f2) so for a floppy device, look for * 'value + 2' in the port resources instead of the hint * value. */ if (strcmp(name, "fdc") == 0) value += 2; if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value)) matches++; else continue; } if (resource_long_value(name, unit, "maddr", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value)) matches++; else continue; } if (matches > 0) goto matched; if (resource_long_value(name, unit, "irq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_IRQ, value)) matches++; else continue; } if (resource_long_value(name, unit, "drq", &value) == 0) { if (acpi_match_resource_hint(child, SYS_RES_DRQ, value)) matches++; else continue; } matched: if (matches > 0) { /* We have a winner! */ *unitp = unit; break; } } } /* * Fetch the NUMA domain for a device by mapping the value returned by * _PXM to a NUMA domain. If the device does not have a _PXM method, * -2 is returned. If any other error occurs, -1 is returned. */ static int acpi_parse_pxm(device_t dev) { #ifdef NUMA #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) ACPI_HANDLE handle; ACPI_STATUS status; int pxm; handle = acpi_get_handle(dev); if (handle == NULL) return (-2); status = acpi_GetInteger(handle, "_PXM", &pxm); if (ACPI_SUCCESS(status)) return (acpi_map_pxm_to_vm_domainid(pxm)); if (status == AE_NOT_FOUND) return (-2); #endif #endif return (-1); } int acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { int d, error; d = acpi_parse_pxm(child); if (d < 0) return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); switch (op) { case LOCAL_CPUS: if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = cpuset_domain[d]; return (0); case INTR_CPUS: error = bus_generic_get_cpus(dev, child, op, setsize, cpuset); if (error != 0) return (error); if (setsize != sizeof(cpuset_t)) return (EINVAL); CPU_AND(cpuset, &cpuset_domain[d]); return (0); default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); } } /* * Fetch the NUMA domain for the given device 'dev'. * * If a device has a _PXM method, map that to a NUMA domain. * Otherwise, pass the request up to the parent. * If there's no matching domain or the domain cannot be * determined, return ENOENT. */ int acpi_get_domain(device_t dev, device_t child, int *domain) { int d; d = acpi_parse_pxm(child); if (d >= 0) { *domain = d; return (0); } if (d == -1) return (ENOENT); /* No _PXM node; go up a level */ return (bus_generic_get_domain(dev, child, domain)); } /* * Pre-allocate/manage all memory and IO resources. Since rman can't handle * duplicates, we merge any in the sysresource attach routine. */ static int acpi_sysres_alloc(device_t dev) { struct resource *res; struct resource_list *rl; struct resource_list_entry *rle; struct rman *rm; device_t *children; int child_count, i; /* * Probe/attach any sysresource devices. This would be unnecessary if we * had multi-pass probe/attach. */ if (device_get_children(dev, &children, &child_count) != 0) return (ENXIO); for (i = 0; i < child_count; i++) { if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) device_probe_and_attach(children[i]); } free(children, M_TEMP); rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev); STAILQ_FOREACH(rle, rl, link) { if (rle->res != NULL) { device_printf(dev, "duplicate resource for %jx\n", rle->start); continue; } /* Only memory and IO resources are valid here. */ switch (rle->type) { case SYS_RES_IOPORT: rm = &acpi_rman_io; break; case SYS_RES_MEMORY: rm = &acpi_rman_mem; break; default: continue; } /* Pre-allocate resource and add to our rman pool. */ res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, rle->type, &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, 0); if (res != NULL) { rman_manage_region(rm, rman_get_start(res), rman_get_end(res)); rle->res = res; } else if (bootverbose) device_printf(dev, "reservation of %jx, %jx (%d) failed\n", rle->start, rle->count, rle->type); } return (0); } /* * Reserve declared resources for devices found during attach once system * resources have been allocated. */ static void acpi_reserve_resources(device_t dev) { struct resource_list_entry *rle; struct resource_list *rl; struct acpi_device *ad; struct acpi_softc *sc; device_t *children; int child_count, i; sc = device_get_softc(dev); if (device_get_children(dev, &children, &child_count) != 0) return; for (i = 0; i < child_count; i++) { ad = device_get_ivars(children[i]); rl = &ad->ad_rl; /* Don't reserve system resources. */ if (ACPI_ID_PROBE(dev, children[i], sysres_ids, NULL) <= 0) continue; STAILQ_FOREACH(rle, rl, link) { /* * Don't reserve IRQ resources. There are many sticky things * to get right otherwise (e.g. IRQs for psm, atkbd, and HPET * when using legacy routing). */ if (rle->type == SYS_RES_IRQ) continue; /* * Don't reserve the resource if it is already allocated. * The acpi_ec(4) driver can allocate its resources early * if ECDT is present. */ if (rle->res != NULL) continue; /* * Try to reserve the resource from our parent. If this * fails because the resource is a system resource, just * let it be. The resource range is already reserved so * that other devices will not use it. If the driver * needs to allocate the resource, then * acpi_alloc_resource() will sub-alloc from the system * resource. */ resource_list_reserve(rl, dev, children[i], rle->type, &rle->rid, rle->start, rle->end, rle->count, 0); } } free(children, M_TEMP); sc->acpi_resources_reserved = 1; } static int acpi_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct acpi_softc *sc = device_get_softc(dev); struct acpi_device *ad = device_get_ivars(child); struct resource_list *rl = &ad->ad_rl; ACPI_DEVICE_INFO *devinfo; rman_res_t end; int allow; /* Ignore IRQ resources for PCI link devices. */ if (type == SYS_RES_IRQ && ACPI_ID_PROBE(dev, child, pcilink_ids, NULL) <= 0) return (0); /* * Ignore most resources for PCI root bridges. Some BIOSes * incorrectly enumerate the memory ranges they decode as plain * memory resources instead of as ResourceProducer ranges. Other * BIOSes incorrectly list system resource entries for I/O ranges * under the PCI bridge. Do allow the one known-correct case on * x86 of a PCI bridge claiming the I/O ports used for PCI config * access. */ if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { if (ACPI_SUCCESS(AcpiGetObjectInfo(ad->ad_handle, &devinfo))) { if ((devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0) { #if defined(__i386__) || defined(__amd64__) allow = (type == SYS_RES_IOPORT && start == CONF1_ADDR_PORT); #else allow = 0; #endif if (!allow) { AcpiOsFree(devinfo); return (0); } } AcpiOsFree(devinfo); } } #ifdef INTRNG /* map with default for now */ if (type == SYS_RES_IRQ) start = (rman_res_t)acpi_map_intr(child, (u_int)start, acpi_get_handle(child)); #endif /* If the resource is already allocated, fail. */ if (resource_list_busy(rl, type, rid)) return (EBUSY); /* If the resource is already reserved, release it. */ if (resource_list_reserved(rl, type, rid)) resource_list_unreserve(rl, dev, child, type, rid); /* Add the resource. */ end = (start + count - 1); resource_list_add(rl, type, rid, start, end, count); /* Don't reserve resources until the system resources are allocated. */ if (!sc->acpi_resources_reserved) return (0); /* Don't reserve system resources. */ if (ACPI_ID_PROBE(dev, child, sysres_ids, NULL) <= 0) return (0); /* * Don't reserve IRQ resources. There are many sticky things to * get right otherwise (e.g. IRQs for psm, atkbd, and HPET when * using legacy routing). */ if (type == SYS_RES_IRQ) return (0); /* * Don't reserve resources for CPU devices. Some of these * resources need to be allocated as shareable, but reservations * are always non-shareable. */ if (device_get_devclass(child) == devclass_find("cpu")) return (0); /* * Reserve the resource. * * XXX: Ignores failure for now. Failure here is probably a * BIOS/firmware bug? */ resource_list_reserve(rl, dev, child, type, &rid, start, end, count, 0); return (0); } static struct resource * acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifndef INTRNG ACPI_RESOURCE ares; #endif struct acpi_device *ad; struct resource_list_entry *rle; struct resource_list *rl; struct resource *res; int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); /* * First attempt at allocating the resource. For direct children, * use resource_list_alloc() to handle reserved resources. For * other devices, pass the request up to our parent. */ if (bus == device_get_parent(child)) { ad = device_get_ivars(child); rl = &ad->ad_rl; /* * Simulate the behavior of the ISA bus for direct children * devices. That is, if a non-default range is specified for * a resource that doesn't exist, use bus_set_resource() to * add the resource before allocating it. Note that these * resources will not be reserved. */ if (!isdefault && resource_list_find(rl, type, *rid) == NULL) resource_list_add(rl, type, *rid, start, end, count); res = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); #ifndef INTRNG if (res != NULL && type == SYS_RES_IRQ) { /* * Since bus_config_intr() takes immediate effect, we cannot * configure the interrupt associated with a device when we * parse the resources but have to defer it until a driver * actually allocates the interrupt via bus_alloc_resource(). * * XXX: Should we handle the lookup failing? */ if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares))) acpi_config_intr(child, &ares); } #endif /* * If this is an allocation of the "default" range for a given * RID, fetch the exact bounds for this resource from the * resource list entry to try to allocate the range from the * system resource regions. */ if (res == NULL && isdefault) { rle = resource_list_find(rl, type, *rid); if (rle != NULL) { start = rle->start; end = rle->end; count = rle->count; } } } else res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); /* * If the first attempt failed and this is an allocation of a * specific range, try to satisfy the request via a suballocation * from our system resource regions. */ if (res == NULL && start + count - 1 == end) res = acpi_alloc_sysres(child, type, rid, start, end, count, flags); return (res); } /* * Attempt to allocate a specific resource range from the system * resource ranges. Note that we only handle memory and I/O port * system resources. */ struct resource * acpi_alloc_sysres(device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct rman *rm; struct resource *res; switch (type) { case SYS_RES_IOPORT: rm = &acpi_rman_io; break; case SYS_RES_MEMORY: rm = &acpi_rman_mem; break; default: return (NULL); } KASSERT(start + count - 1 == end, ("wildcard resource range")); res = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (res == NULL) return (NULL); rman_set_rid(res, *rid); /* If requested, activate the resource using the parent's method. */ if (flags & RF_ACTIVE) if (bus_activate_resource(child, type, *rid, res) != 0) { rman_release_resource(res); return (NULL); } return (res); } static int acpi_is_resource_managed(int type, struct resource *r) { /* We only handle memory and IO resources through rman. */ switch (type) { case SYS_RES_IOPORT: return (rman_is_region_manager(r, &acpi_rman_io)); case SYS_RES_MEMORY: return (rman_is_region_manager(r, &acpi_rman_mem)); } return (0); } static int acpi_adjust_resource(device_t bus, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { if (acpi_is_resource_managed(type, r)) return (rman_adjust_resource(r, start, end)); return (bus_generic_adjust_resource(bus, child, type, r, start, end)); } static int acpi_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int ret; /* * If this resource belongs to one of our internal managers, * deactivate it and release it to the local pool. */ if (acpi_is_resource_managed(type, r)) { if (rman_get_flags(r) & RF_ACTIVE) { ret = bus_deactivate_resource(child, type, rid, r); if (ret != 0) return (ret); } return (rman_release_resource(r)); } return (bus_generic_rl_release_resource(bus, child, type, rid, r)); } static void acpi_delete_resource(device_t bus, device_t child, int type, int rid) { struct resource_list *rl; rl = acpi_get_rlist(bus, child); if (resource_list_busy(rl, type, rid)) { device_printf(bus, "delete_resource: Resource still owned by child" " (type=%d, rid=%d)\n", type, rid); return; } resource_list_unreserve(rl, bus, child, type, rid); resource_list_delete(rl, type, rid); } /* Allocate an IO port or memory resource, given its GAS. */ int acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, struct resource **res, u_int flags) { int error, res_type; error = ENOMEM; if (type == NULL || rid == NULL || gas == NULL || res == NULL) return (EINVAL); /* We only support memory and IO spaces. */ switch (gas->SpaceId) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: res_type = SYS_RES_MEMORY; break; case ACPI_ADR_SPACE_SYSTEM_IO: res_type = SYS_RES_IOPORT; break; default: return (EOPNOTSUPP); } /* * If the register width is less than 8, assume the BIOS author means * it is a bit field and just allocate a byte. */ if (gas->BitWidth && gas->BitWidth < 8) gas->BitWidth = 8; /* Validate the address after we're sure we support the space. */ if (gas->Address == 0 || gas->BitWidth == 0) return (EINVAL); bus_set_resource(dev, res_type, *rid, gas->Address, gas->BitWidth / 8); *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags); if (*res != NULL) { *type = res_type; error = 0; } else bus_delete_resource(dev, res_type, *rid); return (error); } /* Probe _HID and _CID for compatible ISA PNP ids. */ static uint32_t acpi_isa_get_logicalid(device_t dev) { ACPI_DEVICE_INFO *devinfo; ACPI_HANDLE h; uint32_t pnpid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* Fetch and validate the HID. */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); pnpid = (devinfo->Valid & ACPI_VALID_HID) != 0 && devinfo->HardwareId.Length >= ACPI_EISAID_STRING_SIZE ? PNP_EISAID(devinfo->HardwareId.String) : 0; AcpiOsFree(devinfo); return_VALUE (pnpid); } static int acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count) { ACPI_DEVICE_INFO *devinfo; ACPI_PNP_DEVICE_ID *ids; ACPI_HANDLE h; uint32_t *pnpid; int i, valid; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); pnpid = cids; /* Fetch and validate the CID */ if ((h = acpi_get_handle(dev)) == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return_VALUE (0); if ((devinfo->Valid & ACPI_VALID_CID) == 0) { AcpiOsFree(devinfo); return_VALUE (0); } if (devinfo->CompatibleIdList.Count < count) count = devinfo->CompatibleIdList.Count; ids = devinfo->CompatibleIdList.Ids; for (i = 0, valid = 0; i < count; i++) if (ids[i].Length >= ACPI_EISAID_STRING_SIZE && strncmp(ids[i].String, "PNP", 3) == 0) { *pnpid++ = PNP_EISAID(ids[i].String); valid++; } AcpiOsFree(devinfo); return_VALUE (valid); } static int acpi_device_id_probe(device_t bus, device_t dev, char **ids, char **match) { ACPI_HANDLE h; ACPI_OBJECT_TYPE t; int rv; int i; h = acpi_get_handle(dev); if (ids == NULL || h == NULL) return (ENXIO); t = acpi_get_type(dev); if (t != ACPI_TYPE_DEVICE && t != ACPI_TYPE_PROCESSOR) return (ENXIO); /* Try to match one of the array of IDs with a HID or CID. */ for (i = 0; ids[i] != NULL; i++) { rv = acpi_MatchHid(h, ids[i]); if (rv == ACPI_MATCHHID_NOMATCH) continue; if (match != NULL) { *match = ids[i]; } return ((rv == ACPI_MATCHHID_HID)? BUS_PROBE_DEFAULT : BUS_PROBE_LOW_PRIORITY); } return (ENXIO); } static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret) { ACPI_HANDLE h; if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); return (AcpiEvaluateObject(h, pathname, parameters, ret)); } int acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate) { struct acpi_softc *sc; ACPI_HANDLE handle; ACPI_STATUS status; char sxd[8]; handle = acpi_get_handle(dev); /* * XXX If we find these devices, don't try to power them down. * The serial and IRDA ports on my T23 hang the system when * set to D3 and it appears that such legacy devices may * need special handling in their drivers. */ if (dstate == NULL || handle == NULL || acpi_MatchHid(handle, "PNP0500") || acpi_MatchHid(handle, "PNP0501") || acpi_MatchHid(handle, "PNP0502") || acpi_MatchHid(handle, "PNP0510") || acpi_MatchHid(handle, "PNP0511")) return (ENXIO); /* * Override next state with the value from _SxD, if present. * Note illegal _S0D is evaluated because some systems expect this. */ sc = device_get_softc(bus); snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate); status = acpi_GetInteger(handle, sxd, dstate); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { device_printf(dev, "failed to get %s on %s: %s\n", sxd, acpi_name(handle), AcpiFormatException(status)); return (ENXIO); } return (0); } /* Callback arg for our implementation of walking the namespace. */ struct acpi_device_scan_ctx { acpi_scan_cb_t user_fn; void *arg; ACPI_HANDLE parent; }; static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval) { struct acpi_device_scan_ctx *ctx; device_t dev, old_dev; ACPI_STATUS status; ACPI_OBJECT_TYPE type; /* * Skip this device if we think we'll have trouble with it or it is * the parent where the scan began. */ ctx = (struct acpi_device_scan_ctx *)arg; if (acpi_avoid(h) || h == ctx->parent) return (AE_OK); /* If this is not a valid device type (e.g., a method), skip it. */ if (ACPI_FAILURE(AcpiGetType(h, &type))) return (AE_OK); if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR && type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER) return (AE_OK); /* * Call the user function with the current device. If it is unchanged * afterwards, return. Otherwise, we update the handle to the new dev. */ old_dev = acpi_get_device(h); dev = old_dev; status = ctx->user_fn(h, &dev, level, ctx->arg); if (ACPI_FAILURE(status) || old_dev == dev) return (status); /* Remove the old child and its connection to the handle. */ if (old_dev != NULL) device_delete_child(device_get_parent(old_dev), old_dev); /* Recreate the handle association if the user created a device. */ if (dev != NULL) AcpiAttachData(h, acpi_fake_objhandler, dev); return (AE_OK); } static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev, int max_depth, acpi_scan_cb_t user_fn, void *arg) { ACPI_HANDLE h; struct acpi_device_scan_ctx ctx; if (acpi_disabled("children")) return (AE_OK); if (dev == NULL) h = ACPI_ROOT_OBJECT; else if ((h = acpi_get_handle(dev)) == NULL) return (AE_BAD_PARAMETER); ctx.user_fn = user_fn; ctx.arg = arg; ctx.parent = h; return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth, acpi_device_scan_cb, NULL, &ctx, NULL)); } /* * Even though ACPI devices are not PCI, we use the PCI approach for setting * device power states since it's close enough to ACPI. */ int acpi_set_powerstate(device_t child, int state) { ACPI_HANDLE h; ACPI_STATUS status; h = acpi_get_handle(child); if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX) return (EINVAL); if (h == NULL) return (0); /* Ignore errors if the power methods aren't present. */ status = acpi_pwr_switch_consumer(h, state); if (ACPI_SUCCESS(status)) { if (bootverbose) device_printf(child, "set ACPI power state D%d on %s\n", state, acpi_name(h)); } else if (status != AE_NOT_FOUND) device_printf(child, "failed to set ACPI power state D%d on %s: %s\n", state, acpi_name(h), AcpiFormatException(status)); return (0); } static int acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids) { int result, cid_count, i; uint32_t lid, cids[8]; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * ISA-style drivers attached to ACPI may persist and * probe manually if we return ENOENT. We never want * that to happen, so don't ever return it. */ result = ENXIO; /* Scan the supplied IDs for a match */ lid = acpi_isa_get_logicalid(child); cid_count = acpi_isa_get_compatid(child, cids, 8); while (ids && ids->ip_id) { if (lid == ids->ip_id) { result = 0; goto out; } for (i = 0; i < cid_count; i++) { if (cids[i] == ids->ip_id) { result = 0; goto out; } } ids++; } out: if (result == 0 && ids->ip_desc) device_set_desc(child, ids->ip_desc); return_VALUE (result); } /* * Look for a MCFG table. If it is present, use the settings for * domain (segment) 0 to setup PCI config space access via the memory * map. * * On non-x86 architectures (arm64 for now), this will be done from the * PCI host bridge driver. */ static void acpi_enable_pcie(void) { #if defined(__i386__) || defined(__amd64__) ACPI_TABLE_HEADER *hdr; ACPI_MCFG_ALLOCATION *alloc, *end; ACPI_STATUS status; status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr); if (ACPI_FAILURE(status)) return; end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length); alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1); while (alloc < end) { if (alloc->PciSegment == 0) { pcie_cfgregopen(alloc->Address, alloc->StartBusNumber, alloc->EndBusNumber); return; } alloc++; } #endif } static void acpi_platform_osc(device_t dev) { ACPI_HANDLE sb_handle; ACPI_STATUS status; uint32_t cap_set[2]; /* 0811B06E-4A27-44F9-8D60-3CBBC22E7B48 */ static uint8_t acpi_platform_uuid[ACPI_UUID_LENGTH] = { 0x6e, 0xb0, 0x11, 0x08, 0x27, 0x4a, 0xf9, 0x44, 0x8d, 0x60, 0x3c, 0xbb, 0xc2, 0x2e, 0x7b, 0x48 }; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) return; cap_set[1] = 0x10; /* APEI Support */ status = acpi_EvaluateOSC(sb_handle, acpi_platform_uuid, 1, nitems(cap_set), cap_set, cap_set, false); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) return; device_printf(dev, "_OSC failed: %s\n", AcpiFormatException(status)); return; } } /* * Scan all of the ACPI namespace and attach child devices. * * We should only expect to find devices in the \_PR, \_TZ, \_SI, and * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec. * However, in violation of the spec, some systems place their PCI link * devices in \, so we have to walk the whole namespace. We check the * type of namespace nodes, so this should be ok. */ static void acpi_probe_children(device_t bus) { ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * Scan the namespace and insert placeholders for all the devices that * we find. We also probe/attach any early devices. * * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because * we want to create nodes for all devices, not just those that are * currently present. (This assumes that we don't want to create/remove * devices as they appear, which might be smarter.) */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n")); AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child, NULL, bus, NULL); /* Pre-allocate resources for our rman from any sysresource devices. */ acpi_sysres_alloc(bus); /* Reserve resources already allocated to children. */ acpi_reserve_resources(bus); /* Create any static children by calling device identify methods. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n")); bus_generic_probe(bus); /* Probe/attach all children, created statically and from the namespace. */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n")); bus_generic_attach(bus); /* Attach wake sysctls. */ acpi_wake_sysctl_walk(bus); ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n")); return_VOID; } /* * Determine the probe order for a given device. */ static void acpi_probe_order(ACPI_HANDLE handle, int *order) { ACPI_OBJECT_TYPE type; /* * 0. CPUs * 1. I/O port and memory system resource holders * 2. Clocks and timers (to handle early accesses) * 3. Embedded controllers (to handle early accesses) * 4. PCI Link Devices */ AcpiGetType(handle, &type); if (type == ACPI_TYPE_PROCESSOR) *order = 0; else if (acpi_MatchHid(handle, "PNP0C01") || acpi_MatchHid(handle, "PNP0C02")) *order = 1; else if (acpi_MatchHid(handle, "PNP0100") || acpi_MatchHid(handle, "PNP0103") || acpi_MatchHid(handle, "PNP0B00")) *order = 2; else if (acpi_MatchHid(handle, "PNP0C09")) *order = 3; else if (acpi_MatchHid(handle, "PNP0C0F")) *order = 4; } /* * Evaluate a child device and determine whether we might attach a device to * it. */ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_DEVICE_INFO *devinfo; struct acpi_device *ad; struct acpi_prw_data prw; ACPI_OBJECT_TYPE type; ACPI_HANDLE h; device_t bus, child; char *handle_str; int order; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (acpi_disabled("children")) return_ACPI_STATUS (AE_OK); /* Skip this device if we think we'll have trouble with it. */ if (acpi_avoid(handle)) return_ACPI_STATUS (AE_OK); bus = (device_t)context; if (ACPI_SUCCESS(AcpiGetType(handle, &type))) { handle_str = acpi_name(handle); switch (type) { case ACPI_TYPE_DEVICE: /* * Since we scan from \, be sure to skip system scope objects. * \_SB_ and \_TZ_ are defined in ACPICA as devices to work around * BIOS bugs. For example, \_SB_ is to allow \_SB_._INI to be run * during the initialization and \_TZ_ is to support Notify() on it. */ if (strcmp(handle_str, "\\_SB_") == 0 || strcmp(handle_str, "\\_TZ_") == 0) break; if (acpi_parse_prw(handle, &prw) == 0) AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit); /* * Ignore devices that do not have a _HID or _CID. They should * be discovered by other buses (e.g. the PCI bus driver). */ if (!acpi_has_hid(handle)) break; /* FALLTHROUGH */ case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: case ACPI_TYPE_POWER: /* * Create a placeholder device for this node. Sort the * placeholder so that the probe/attach passes will run * breadth-first. Orders less than ACPI_DEV_BASE_ORDER * are reserved for special objects (i.e., system * resources). */ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str)); order = level * 10 + ACPI_DEV_BASE_ORDER; acpi_probe_order(handle, &order); child = BUS_ADD_CHILD(bus, order, NULL, -1); if (child == NULL) break; /* Associate the handle with the device_t and vice versa. */ acpi_set_handle(child, handle); AcpiAttachData(handle, acpi_fake_objhandler, child); /* * Check that the device is present. If it's not present, * leave it disabled (so that we have a device_t attached to * the handle, but we don't probe it). * * XXX PCI link devices sometimes report "present" but not * "functional" (i.e. if disabled). Go ahead and probe them * anyway since we may enable them later. */ if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) { /* Never disable PCI link devices. */ if (acpi_MatchHid(handle, "PNP0C0F")) break; /* * Docking stations should remain enabled since the system * may be undocked at boot. */ if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h))) break; device_disable(child); break; } /* * Get the device's resource settings and attach them. * Note that if the device has _PRS but no _CRS, we need * to decide when it's appropriate to try to configure the * device. Ignore the return value here; it's OK for the * device not to have any resources. */ acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL); ad = device_get_ivars(child); ad->ad_cls_class = 0xffffff; if (ACPI_SUCCESS(AcpiGetObjectInfo(handle, &devinfo))) { if ((devinfo->Valid & ACPI_VALID_CLS) != 0 && devinfo->ClassCode.Length >= ACPI_PCICLS_STRING_SIZE) { ad->ad_cls_class = strtoul(devinfo->ClassCode.String, NULL, 16); } AcpiOsFree(devinfo); } break; } } return_ACPI_STATUS (AE_OK); } /* * AcpiAttachData() requires an object handler but never uses it. This is a * placeholder object handler so we can store a device_t in an ACPI_HANDLE. */ void acpi_fake_objhandler(ACPI_HANDLE h, void *data) { } static void acpi_shutdown_final(void *arg, int howto) { struct acpi_softc *sc = (struct acpi_softc *)arg; register_t intr; ACPI_STATUS status; /* * XXX Shutdown code should only run on the BSP (cpuid 0). * Some chipsets do not power off the system correctly if called from * an AP. */ if ((howto & RB_POWEROFF) != 0) { status = AcpiEnterSleepStatePrep(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); return; } device_printf(sc->acpi_dev, "Powering system off\n"); intr = intr_disable(); status = AcpiEnterSleepState(ACPI_STATE_S5); if (ACPI_FAILURE(status)) { intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - %s\n", AcpiFormatException(status)); } else { DELAY(1000000); intr_restore(intr); device_printf(sc->acpi_dev, "power-off failed - timeout\n"); } } else if ((howto & RB_HALT) == 0 && sc->acpi_handle_reboot) { /* Reboot using the reset register. */ status = AcpiReset(); if (ACPI_SUCCESS(status)) { DELAY(1000000); device_printf(sc->acpi_dev, "reset failed - timeout\n"); } else if (status != AE_NOT_EXIST) device_printf(sc->acpi_dev, "reset failed - %s\n", AcpiFormatException(status)); } else if (sc->acpi_do_disable && !KERNEL_PANICKED()) { /* * Only disable ACPI if the user requested. On some systems, writing * the disable value to SMI_CMD hangs the system. */ device_printf(sc->acpi_dev, "Shutting down\n"); AcpiTerminate(); } } static void acpi_enable_fixed_events(struct acpi_softc *sc) { static int first_time = 1; /* Enable and clear fixed events and install handlers. */ if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON, acpi_event_power_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Power Button (fixed)\n"); } if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) { AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON); AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON, acpi_event_sleep_button_sleep, sc); if (first_time) device_printf(sc->acpi_dev, "Sleep Button (fixed)\n"); } first_time = 0; } /* * Returns true if the device is actually present and should * be attached to. This requires the present, enabled, UI-visible * and diagnostics-passed bits to be set. */ BOOLEAN acpi_DeviceIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); #ifdef ACPI_EARLY_EPYC_WAR /* * Certain Treadripper boards always returns 0 for FreeBSD because it * only returns non-zero for the OS string "Windows 2015". Otherwise it * will return zero. Force them to always be treated as present. * Beata versions were worse: they always returned 0. */ if (acpi_MatchHid(h, "AMDI0020") || acpi_MatchHid(h, "AMDI0010")) return (TRUE); #endif status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_DEVICE_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if the battery is actually present and inserted. */ BOOLEAN acpi_BatteryIsPresent(device_t dev) { ACPI_HANDLE h; UINT32 s; ACPI_STATUS status; h = acpi_get_handle(dev); if (h == NULL) return (FALSE); status = acpi_GetInteger(h, "_STA", &s); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (ACPI_FAILURE(status)) return (TRUE); return (ACPI_BATTERY_PRESENT(s) ? TRUE : FALSE); } /* * Returns true if a device has at least one valid device ID. */ BOOLEAN acpi_has_hid(ACPI_HANDLE h) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; if (h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (FALSE); ret = FALSE; if ((devinfo->Valid & ACPI_VALID_HID) != 0) ret = TRUE; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) if (devinfo->CompatibleIdList.Count > 0) ret = TRUE; AcpiOsFree(devinfo); return (ret); } /* * Match a HID string against a handle * returns ACPI_MATCHHID_HID if _HID match * ACPI_MATCHHID_CID if _CID match and not _HID match. * ACPI_MATCHHID_NOMATCH=0 if no match. */ int acpi_MatchHid(ACPI_HANDLE h, const char *hid) { ACPI_DEVICE_INFO *devinfo; BOOLEAN ret; int i; if (hid == NULL || h == NULL || ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo))) return (ACPI_MATCHHID_NOMATCH); ret = ACPI_MATCHHID_NOMATCH; if ((devinfo->Valid & ACPI_VALID_HID) != 0 && strcmp(hid, devinfo->HardwareId.String) == 0) ret = ACPI_MATCHHID_HID; else if ((devinfo->Valid & ACPI_VALID_CID) != 0) for (i = 0; i < devinfo->CompatibleIdList.Count; i++) { if (strcmp(hid, devinfo->CompatibleIdList.Ids[i].String) == 0) { ret = ACPI_MATCHHID_CID; break; } } AcpiOsFree(devinfo); return (ret); } /* * Return the handle of a named object within our scope, ie. that of (parent) * or one if its parents. */ ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result) { ACPI_HANDLE r; ACPI_STATUS status; /* Walk back up the tree to the root */ for (;;) { status = AcpiGetHandle(parent, path, &r); if (ACPI_SUCCESS(status)) { *result = r; return (AE_OK); } /* XXX Return error here? */ if (status != AE_NOT_FOUND) return (AE_OK); if (ACPI_FAILURE(AcpiGetParent(parent, &r))) return (AE_NOT_FOUND); parent = r; } } /* * Allocate a buffer with a preset data size. */ ACPI_BUFFER * acpi_AllocBuffer(int size) { ACPI_BUFFER *buf; if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL) return (NULL); buf->Length = size; buf->Pointer = (void *)(buf + 1); return (buf); } ACPI_STATUS acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number) { ACPI_OBJECT arg1; ACPI_OBJECT_LIST args; arg1.Type = ACPI_TYPE_INTEGER; arg1.Integer.Value = number; args.Count = 1; args.Pointer = &arg1; return (AcpiEvaluateObject(handle, path, &args, NULL)); } /* * Evaluate a path that should return an integer. */ ACPI_STATUS acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number) { ACPI_STATUS status; ACPI_BUFFER buf; ACPI_OBJECT param; if (handle == NULL) handle = ACPI_ROOT_OBJECT; /* * Assume that what we've been pointed at is an Integer object, or * a method that will return an Integer. */ buf.Pointer = ¶m; buf.Length = sizeof(param); status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) { if (param.Type == ACPI_TYPE_INTEGER) *number = param.Integer.Value; else status = AE_TYPE; } /* * In some applications, a method that's expected to return an Integer * may instead return a Buffer (probably to simplify some internal * arithmetic). We'll try to fetch whatever it is, and if it's a Buffer, * convert it into an Integer as best we can. * * This is a hack. */ if (status == AE_BUFFER_OVERFLOW) { if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) { status = AE_NO_MEMORY; } else { status = AcpiEvaluateObject(handle, path, NULL, &buf); if (ACPI_SUCCESS(status)) status = acpi_ConvertBufferToInteger(&buf, number); AcpiOsFree(buf.Pointer); } } return (status); } ACPI_STATUS acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number) { ACPI_OBJECT *p; UINT8 *val; int i; p = (ACPI_OBJECT *)bufp->Pointer; if (p->Type == ACPI_TYPE_INTEGER) { *number = p->Integer.Value; return (AE_OK); } if (p->Type != ACPI_TYPE_BUFFER) return (AE_TYPE); if (p->Buffer.Length > sizeof(int)) return (AE_BAD_DATA); *number = 0; val = p->Buffer.Pointer; for (i = 0; i < p->Buffer.Length; i++) *number += val[i] << (i * 8); return (AE_OK); } /* * Iterate over the elements of an a package object, calling the supplied * function for each element. * * XXX possible enhancement might be to abort traversal on error. */ ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *pkg, void (*func)(ACPI_OBJECT *comp, void *arg), void *arg) { ACPI_OBJECT *comp; int i; if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE) return (AE_BAD_PARAMETER); /* Iterate over components */ i = 0; comp = pkg->Package.Elements; for (; i < pkg->Package.Count; i++, comp++) func(comp, arg); return (AE_OK); } /* * Find the (index)th resource object in a set. */ ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp) { ACPI_RESOURCE *rp; int i; rp = (ACPI_RESOURCE *)buf->Pointer; i = index; while (i-- > 0) { /* Range check */ if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); /* Check for terminator */ if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) return (AE_NOT_FOUND); rp = ACPI_NEXT_RESOURCE(rp); } if (resp != NULL) *resp = rp; return (AE_OK); } /* * Append an ACPI_RESOURCE to an ACPI_BUFFER. * * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER * provided to contain it. If the ACPI_BUFFER is empty, allocate a sensible * backing block. If the ACPI_RESOURCE is NULL, return an empty set of * resources. */ #define ACPI_INITIAL_RESOURCE_BUFFER_SIZE 512 ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res) { ACPI_RESOURCE *rp; void *newp; /* Initialise the buffer if necessary. */ if (buf->Pointer == NULL) { buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE; if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL) return (AE_NO_MEMORY); rp = (ACPI_RESOURCE *)buf->Pointer; rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; } if (res == NULL) return (AE_OK); /* * Scan the current buffer looking for the terminator. * This will either find the terminator or hit the end * of the buffer and return an error. */ rp = (ACPI_RESOURCE *)buf->Pointer; for (;;) { /* Range check, don't go outside the buffer */ if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length)) return (AE_BAD_PARAMETER); if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0) break; rp = ACPI_NEXT_RESOURCE(rp); } /* * Check the size of the buffer and expand if required. * * Required size is: * size of existing resources before terminator + * size of new resource and header + * size of terminator. * * Note that this loop should really only run once, unless * for some reason we are stuffing a *really* huge resource. */ while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) + res->Length + ACPI_RS_SIZE_NO_DATA + ACPI_RS_SIZE_MIN) >= buf->Length) { if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL) return (AE_NO_MEMORY); bcopy(buf->Pointer, newp, buf->Length); rp = (ACPI_RESOURCE *)((u_int8_t *)newp + ((u_int8_t *)rp - (u_int8_t *)buf->Pointer)); AcpiOsFree(buf->Pointer); buf->Pointer = newp; buf->Length += buf->Length; } /* Insert the new resource. */ bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA); /* And add the terminator. */ rp = ACPI_NEXT_RESOURCE(rp); rp->Type = ACPI_RESOURCE_TYPE_END_TAG; rp->Length = ACPI_RS_SIZE_MIN; return (AE_OK); } UINT64 acpi_DSMQuery(ACPI_HANDLE h, const uint8_t *uuid, int revision) { /* * ACPI spec 9.1.1 defines this. * * "Arg2: Function Index Represents a specific function whose meaning is * specific to the UUID and Revision ID. Function indices should start * with 1. Function number zero is a query function (see the special * return code defined below)." */ ACPI_BUFFER buf; ACPI_OBJECT *obj; UINT64 ret = 0; int i; if (!ACPI_SUCCESS(acpi_EvaluateDSM(h, uuid, revision, 0, NULL, &buf))) { ACPI_INFO(("Failed to enumerate DSM functions\n")); return (0); } obj = (ACPI_OBJECT *)buf.Pointer; KASSERT(obj, ("Object not allowed to be NULL\n")); /* * From ACPI 6.2 spec 9.1.1: * If Function Index = 0, a Buffer containing a function index bitfield. * Otherwise, the return value and type depends on the UUID and revision * ID (see below). */ switch (obj->Type) { case ACPI_TYPE_BUFFER: for (i = 0; i < MIN(obj->Buffer.Length, sizeof(ret)); i++) ret |= (((uint64_t)obj->Buffer.Pointer[i]) << (i * 8)); break; case ACPI_TYPE_INTEGER: ACPI_BIOS_WARNING((AE_INFO, "Possibly buggy BIOS with ACPI_TYPE_INTEGER for function enumeration\n")); ret = obj->Integer.Value; break; default: ACPI_WARNING((AE_INFO, "Unexpected return type %u\n", obj->Type)); }; AcpiOsFree(obj); return ret; } /* * DSM may return multiple types depending on the function. It is therefore * unsafe to use the typed evaluation. It is highly recommended that the caller * check the type of the returned object. */ ACPI_STATUS acpi_EvaluateDSM(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf) { return (acpi_EvaluateDSMTyped(handle, uuid, revision, function, package, out_buf, ACPI_TYPE_ANY)); } ACPI_STATUS acpi_EvaluateDSMTyped(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf, ACPI_OBJECT_TYPE type) { ACPI_OBJECT arg[4]; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; if (out_buf == NULL) return (AE_NO_MEMORY); arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = __DECONST(uint8_t *, uuid); arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = function; if (package) { arg[3] = *package; } else { arg[3].Type = ACPI_TYPE_PACKAGE; arg[3].Package.Count = 0; arg[3].Package.Elements = NULL; } arglist.Pointer = arg; arglist.Count = 4; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_DSM", &arglist, &buf, type); if (ACPI_FAILURE(status)) return (status); KASSERT(ACPI_SUCCESS(status), ("Unexpected status")); *out_buf = buf; return (status); } ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count, uint32_t *caps_in, uint32_t *caps_out, bool query) { ACPI_OBJECT arg[4], *ret; ACPI_OBJECT_LIST arglist; ACPI_BUFFER buf; ACPI_STATUS status; arglist.Pointer = arg; arglist.Count = 4; arg[0].Type = ACPI_TYPE_BUFFER; arg[0].Buffer.Length = ACPI_UUID_LENGTH; arg[0].Buffer.Pointer = uuid; arg[1].Type = ACPI_TYPE_INTEGER; arg[1].Integer.Value = revision; arg[2].Type = ACPI_TYPE_INTEGER; arg[2].Integer.Value = count; arg[3].Type = ACPI_TYPE_BUFFER; arg[3].Buffer.Length = count * sizeof(*caps_in); arg[3].Buffer.Pointer = (uint8_t *)caps_in; caps_in[0] = query ? 1 : 0; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObjectTyped(handle, "_OSC", &arglist, &buf, ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) return (status); if (caps_out != NULL) { ret = buf.Pointer; if (ret->Buffer.Length != count * sizeof(*caps_out)) { AcpiOsFree(buf.Pointer); return (AE_BUFFER_OVERFLOW); } bcopy(ret->Buffer.Pointer, caps_out, ret->Buffer.Length); } AcpiOsFree(buf.Pointer); return (status); } /* * Set interrupt model. */ ACPI_STATUS acpi_SetIntrModel(int model) { return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model)); } /* * Walk subtables of a table and call a callback routine for each * subtable. The caller should provide the first subtable and a * pointer to the end of the table. This can be used to walk tables * such as MADT and SRAT that use subtable entries. */ void acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler, void *arg) { ACPI_SUBTABLE_HEADER *entry; for (entry = first; (void *)entry < end; ) { /* Avoid an infinite loop if we hit a bogus entry. */ if (entry->Length < sizeof(ACPI_SUBTABLE_HEADER)) return; handler(entry, arg); entry = ACPI_ADD_PTR(ACPI_SUBTABLE_HEADER, entry, entry->Length); } } /* * DEPRECATED. This interface has serious deficiencies and will be * removed. * * Immediately enter the sleep state. In the old model, acpiconf(8) ran * rc.suspend and rc.resume so we don't have to notify devd(8) to do this. */ ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state) { static int once; if (!once) { device_printf(sc->acpi_dev, "warning: acpi_SetSleepState() deprecated, need to update your software\n"); once = 1; } return (acpi_EnterSleepState(sc, state)); } #if defined(__amd64__) || defined(__i386__) static void acpi_sleep_force_task(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) device_printf(sc->acpi_dev, "force sleep state S%d failed\n", sc->acpi_next_sstate); } static void acpi_sleep_force(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; device_printf(sc->acpi_dev, "suspend request timed out, forcing sleep now\n"); /* * XXX Suspending from callout causes freezes in DEVICE_SUSPEND(). * Suspend from acpi_task thread instead. */ if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_sleep_force_task, sc))) device_printf(sc->acpi_dev, "AcpiOsExecute() for sleeping failed\n"); } #endif /* * Request that the system enter the given suspend state. All /dev/apm * devices and devd(8) will be notified. Userland then has a chance to * save state and acknowledge the request. The system sleeps once all * acks are in. */ int acpi_ReqSleepState(struct acpi_softc *sc, int state) { #if defined(__amd64__) || defined(__i386__) struct apm_clone_data *clone; ACPI_STATUS status; if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); /* * If a reboot/shutdown/suspend request is already in progress or * suspend is blocked due to an upcoming shutdown, just return. */ if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) { return (0); } /* Wait until sleep is enabled. */ while (sc->acpi_sleep_disabled) { AcpiOsSleep(1000); } ACPI_LOCK(acpi); sc->acpi_next_sstate = state; /* S5 (soft-off) should be entered directly with no waiting. */ if (state == ACPI_STATE_S5) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* Record the pending state and notify all apm devices. */ STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { clone->notify_status = APM_EV_NONE; if ((clone->flags & ACPI_EVF_DEVD) == 0) { selwakeuppri(&clone->sel_read, PZERO); KNOTE_LOCKED(&clone->sel_read.si_note, 0); } } /* If devd(8) is not running, immediately enter the sleep state. */ if (!devctl_process_running()) { ACPI_UNLOCK(acpi); status = acpi_EnterSleepState(sc, state); return (ACPI_SUCCESS(status) ? 0 : ENXIO); } /* * Set a timeout to fire if userland doesn't ack the suspend request * in time. This way we still eventually go to sleep if we were * overheating or running low on battery, even if userland is hung. * We cancel this timeout once all userland acks are in or the * suspend request is aborted. */ callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc); ACPI_UNLOCK(acpi); /* Now notify devd(8) also. */ acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state); return (0); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } /* * Acknowledge (or reject) a pending sleep state. The caller has * prepared for suspend and is now ready for it to proceed. If the * error argument is non-zero, it indicates suspend should be cancelled * and gives an errno value describing why. Once all votes are in, * we suspend the system. */ int acpi_AckSleepState(struct apm_clone_data *clone, int error) { #if defined(__amd64__) || defined(__i386__) struct acpi_softc *sc; int ret, sleeping; /* If no pending sleep state, return an error. */ ACPI_LOCK(acpi); sc = clone->acpi_sc; if (sc->acpi_next_sstate == 0) { ACPI_UNLOCK(acpi); return (ENXIO); } /* Caller wants to abort suspend process. */ if (error) { sc->acpi_next_sstate = 0; callout_stop(&sc->susp_force_to); device_printf(sc->acpi_dev, "listener on %s cancelled the pending suspend\n", devtoname(clone->cdev)); ACPI_UNLOCK(acpi); return (0); } /* * Mark this device as acking the suspend request. Then, walk through * all devices, seeing if they agree yet. We only count devices that * are writable since read-only devices couldn't ack the request. */ sleeping = TRUE; clone->notify_status = APM_EV_ACKED; STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) { if ((clone->flags & ACPI_EVF_WRITE) != 0 && clone->notify_status != APM_EV_ACKED) { sleeping = FALSE; break; } } /* If all devices have voted "yes", we will suspend now. */ if (sleeping) callout_stop(&sc->susp_force_to); ACPI_UNLOCK(acpi); ret = 0; if (sleeping) { if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) ret = ENODEV; } return (ret); #else /* This platform does not support acpi suspend/resume. */ return (EOPNOTSUPP); #endif } static void acpi_sleep_enable(void *arg) { struct acpi_softc *sc = (struct acpi_softc *)arg; ACPI_LOCK_ASSERT(acpi); /* Reschedule if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) { callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); return; } sc->acpi_sleep_disabled = FALSE; } static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc) { ACPI_STATUS status; /* Fail if the system is not fully up and running. */ if (!AcpiGbl_SystemAwakeAndRunning) return (AE_ERROR); ACPI_LOCK(acpi); status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK; sc->acpi_sleep_disabled = TRUE; ACPI_UNLOCK(acpi); return (status); } enum acpi_sleep_state { ACPI_SS_NONE, ACPI_SS_GPE_SET, ACPI_SS_DEV_SUSPEND, ACPI_SS_SLP_PREP, ACPI_SS_SLEPT, }; /* * Enter the desired system sleep state. * * Currently we support S1-S5 but S4 is only S4BIOS */ static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state) { register_t intr; ACPI_STATUS status; ACPI_EVENT_STATUS power_button_status; enum acpi_sleep_state slp_state; int sleep_result; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX) return_ACPI_STATUS (AE_BAD_PARAMETER); if (!acpi_sleep_states[state]) { device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n", state); return (AE_SUPPORT); } /* Re-entry once we're suspending is not allowed. */ status = acpi_sleep_disable(sc); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "suspend request ignored (not ready yet)\n"); return (status); } if (state == ACPI_STATE_S5) { /* * Shut down cleanly and power off. This will call us back through the * shutdown handlers. */ shutdown_nice(RB_POWEROFF); return_ACPI_STATUS (AE_OK); } EVENTHANDLER_INVOKE(power_suspend_early); stop_all_proc(); suspend_all_fs(); EVENTHANDLER_INVOKE(power_suspend); #ifdef EARLY_AP_STARTUP MPASS(mp_ncpus == 1 || smp_started); thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_bind(curthread, 0); thread_unlock(curthread); } #endif /* * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE * drivers need this. */ mtx_lock(&Giant); slp_state = ACPI_SS_NONE; sc->acpi_sstate = state; /* Enable any GPEs as appropriate and requested by the user. */ acpi_wake_prep_walk(state); slp_state = ACPI_SS_GPE_SET; /* * Inform all devices that we are going to sleep. If at least one * device fails, DEVICE_SUSPEND() automatically resumes the tree. * * XXX Note that a better two-pass approach with a 'veto' pass * followed by a "real thing" pass would be better, but the current * bus interface does not provide for this. */ if (DEVICE_SUSPEND(root_bus) != 0) { device_printf(sc->acpi_dev, "device_suspend failed\n"); goto backout; } slp_state = ACPI_SS_DEV_SUSPEND; status = AcpiEnterSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n", AcpiFormatException(status)); goto backout; } slp_state = ACPI_SS_SLP_PREP; if (sc->acpi_sleep_delay > 0) DELAY(sc->acpi_sleep_delay * 1000000); suspendclock(); intr = intr_disable(); if (state != ACPI_STATE_S1) { sleep_result = acpi_sleep_machdep(sc, state); acpi_wakeup_machdep(sc, state, sleep_result, 0); /* * XXX According to ACPI specification SCI_EN bit should be restored * by ACPI platform (BIOS, firmware) to its pre-sleep state. * Unfortunately some BIOSes fail to do that and that leads to * unexpected and serious consequences during wake up like a system * getting stuck in SMI handlers. * This hack is picked up from Linux, which claims that it follows * Windows behavior. */ if (sleep_result == 1 && state != ACPI_STATE_S4) AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT); if (sleep_result == 1 && state == ACPI_STATE_S3) { /* * Prevent mis-interpretation of the wakeup by power button * as a request for power off. * Ideally we should post an appropriate wakeup event, * perhaps using acpi_event_power_button_wake or alike. * * Clearing of power button status after wakeup is mandated * by ACPI specification in section "Fixed Power Button". * * XXX As of ACPICA 20121114 AcpiGetEventStatus provides * status as 0/1 corressponding to inactive/active despite * its type being ACPI_EVENT_STATUS. In other words, * we should not test for ACPI_EVENT_FLAG_SET for time being. */ if (ACPI_SUCCESS(AcpiGetEventStatus(ACPI_EVENT_POWER_BUTTON, &power_button_status)) && power_button_status != 0) { AcpiClearEvent(ACPI_EVENT_POWER_BUTTON); device_printf(sc->acpi_dev, "cleared fixed power button status\n"); } } intr_restore(intr); /* call acpi_wakeup_machdep() again with interrupt enabled */ acpi_wakeup_machdep(sc, state, sleep_result, 1); AcpiLeaveSleepStatePrep(state); if (sleep_result == -1) goto backout; /* Re-enable ACPI hardware on wakeup from sleep state 4. */ if (state == ACPI_STATE_S4) AcpiEnable(); } else { status = AcpiEnterSleepState(state); intr_restore(intr); AcpiLeaveSleepStatePrep(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); goto backout; } } slp_state = ACPI_SS_SLEPT; /* * Back out state according to how far along we got in the suspend * process. This handles both the error and success cases. */ backout: if (slp_state >= ACPI_SS_SLP_PREP) resumeclock(); if (slp_state >= ACPI_SS_GPE_SET) { acpi_wake_prep_walk(state); sc->acpi_sstate = ACPI_STATE_S0; } if (slp_state >= ACPI_SS_DEV_SUSPEND) DEVICE_RESUME(root_bus); if (slp_state >= ACPI_SS_SLP_PREP) AcpiLeaveSleepState(state); if (slp_state >= ACPI_SS_SLEPT) { #if defined(__i386__) || defined(__amd64__) /* NB: we are still using ACPI timecounter at this point. */ resume_TSC(); #endif acpi_resync_clock(sc); acpi_enable_fixed_events(sc); } sc->acpi_next_sstate = 0; mtx_unlock(&Giant); #ifdef EARLY_AP_STARTUP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); #else if (smp_started) { thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); } #endif resume_all_fs(); resume_all_proc(); EVENTHANDLER_INVOKE(power_resume); /* Allow another sleep request after a while. */ callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME); /* Run /etc/rc.resume after we are back. */ if (devctl_process_running()) acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state); return_ACPI_STATUS (status); } static void acpi_resync_clock(struct acpi_softc *sc) { /* * Warm up timecounter again and reset system clock. */ (void)timecounter->tc_get_timecount(timecounter); inittodr(time_second + sc->acpi_sleep_delay); } /* Enable or disable the device's wake GPE. */ int acpi_wake_set_enable(device_t dev, int enable) { struct acpi_prw_data prw; ACPI_STATUS status; int flags; /* Make sure the device supports waking the system and get the GPE. */ if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0) return (ENXIO); flags = acpi_get_flags(dev); if (enable) { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "enable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED); } else { status = AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { device_printf(dev, "disable wake failed\n"); return (ENXIO); } acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED); } return (0); } static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* Check that this is a wake-capable device and get its GPE. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); /* * The destination sleep state must be less than (i.e., higher power) * or equal to the value specified by _PRW. If this GPE cannot be * enabled for the next sleep state, then disable it. If it can and * the user requested it be enabled, turn on any required power resources * and set _PSW. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE); if (bootverbose) device_printf(dev, "wake_prep disabled wake for %s (S%d)\n", acpi_name(handle), sstate); } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) { acpi_pwr_wake_enable(handle, 1); acpi_SetInteger(handle, "_PSW", 1); if (bootverbose) device_printf(dev, "wake_prep enabled for %s (S%d)\n", acpi_name(handle), sstate); } return (0); } static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate) { struct acpi_prw_data prw; device_t dev; /* * Check that this is a wake-capable device and get its GPE. Return * now if the user didn't enable this device for wake. */ if (acpi_parse_prw(handle, &prw) != 0) return (ENXIO); dev = acpi_get_device(handle); if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0) return (0); /* * If this GPE couldn't be enabled for the previous sleep state, it was * disabled before going to sleep so re-enable it. If it was enabled, * clear _PSW and turn off any power resources it used. */ if (sstate > prw.lowest_wake) { AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_ENABLE); if (bootverbose) device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle)); } else { acpi_SetInteger(handle, "_PSW", 0); acpi_pwr_wake_enable(handle, 0); if (bootverbose) device_printf(dev, "run_prep cleaned up for %s\n", acpi_name(handle)); } return (0); } static ACPI_STATUS acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { int sstate; /* If suspending, run the sleep prep function, otherwise wake. */ sstate = *(int *)context; if (AcpiGbl_SystemAwakeAndRunning) acpi_wake_sleep_prep(handle, sstate); else acpi_wake_run_prep(handle, sstate); return (AE_OK); } /* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */ static int acpi_wake_prep_walk(int sstate) { ACPI_HANDLE sb_handle; if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle))) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100, acpi_wake_prep, NULL, &sstate, NULL); return (0); } /* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */ static int acpi_wake_sysctl_walk(device_t dev) { int error, i, numdevs; device_t *devlist; device_t child; ACPI_STATUS status; error = device_get_children(dev, &devlist, &numdevs); if (error != 0 || numdevs == 0) { if (numdevs == 0) free(devlist, M_TEMP); return (error); } for (i = 0; i < numdevs; i++) { child = devlist[i]; acpi_wake_sysctl_walk(child); if (!device_is_attached(child)) continue; status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL); if (ACPI_SUCCESS(status)) { SYSCTL_ADD_PROC(device_get_sysctl_ctx(child), SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO, "wake", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, child, 0, acpi_wake_set_sysctl, "I", "Device set to wake the system"); } } free(devlist, M_TEMP); return (0); } /* Enable or disable wake from userland. */ static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS) { int enable, error; device_t dev; dev = (device_t)arg1; enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0; error = sysctl_handle_int(oidp, &enable, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (enable != 0 && enable != 1) return (EINVAL); return (acpi_wake_set_enable(dev, enable)); } /* Parse a device's _PRW into a structure. */ int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw) { ACPI_STATUS status; ACPI_BUFFER prw_buffer; ACPI_OBJECT *res, *res2; int error, i, power_count; if (h == NULL || prw == NULL) return (EINVAL); /* * The _PRW object (7.2.9) is only required for devices that have the * ability to wake the system from a sleeping state. */ error = EINVAL; prw_buffer.Pointer = NULL; prw_buffer.Length = ACPI_ALLOCATE_BUFFER; status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer); if (ACPI_FAILURE(status)) return (ENOENT); res = (ACPI_OBJECT *)prw_buffer.Pointer; if (res == NULL) return (ENOENT); if (!ACPI_PKG_VALID(res, 2)) goto out; /* * Element 1 of the _PRW object: * The lowest power system sleeping state that can be entered while still * providing wake functionality. The sleeping state being entered must * be less than (i.e., higher power) or equal to this value. */ if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0) goto out; /* * Element 0 of the _PRW object: */ switch (res->Package.Elements[0].Type) { case ACPI_TYPE_INTEGER: /* * If the data type of this package element is numeric, then this * _PRW package element is the bit index in the GPEx_EN, in the * GPE blocks described in the FADT, of the enable bit that is * enabled for the wake event. */ prw->gpe_handle = NULL; prw->gpe_bit = res->Package.Elements[0].Integer.Value; error = 0; break; case ACPI_TYPE_PACKAGE: /* * If the data type of this package element is a package, then this * _PRW package element is itself a package containing two * elements. The first is an object reference to the GPE Block * device that contains the GPE that will be triggered by the wake * event. The second element is numeric and it contains the bit * index in the GPEx_EN, in the GPE Block referenced by the * first element in the package, of the enable bit that is enabled for * the wake event. * * For example, if this field is a package then it is of the form: * Package() {\_SB.PCI0.ISA.GPE, 2} */ res2 = &res->Package.Elements[0]; if (!ACPI_PKG_VALID(res2, 2)) goto out; prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]); if (prw->gpe_handle == NULL) goto out; if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0) goto out; error = 0; break; default: goto out; } /* Elements 2 to N of the _PRW object are power resources. */ power_count = res->Package.Count - 2; if (power_count > ACPI_PRW_MAX_POWERRES) { printf("ACPI device %s has too many power resources\n", acpi_name(h)); power_count = 0; } prw->power_res_count = power_count; for (i = 0; i < power_count; i++) prw->power_res[i] = res->Package.Elements[i]; out: if (prw_buffer.Pointer != NULL) AcpiOsFree(prw_buffer.Pointer); return (error); } /* * ACPI Event Handlers */ /* System Event Handlers (registered by EVENTHANDLER_REGISTER) */ static void acpi_system_eventhandler_sleep(void *arg, int state) { struct acpi_softc *sc = (struct acpi_softc *)arg; int ret; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Check if button action is disabled or unknown. */ if (state == ACPI_STATE_UNKNOWN) return; /* Request that the system prepare to enter the given suspend state. */ ret = acpi_ReqSleepState(sc, state); if (ret != 0) device_printf(sc->acpi_dev, "request to enter state S%d failed (err %d)\n", state, ret); return_VOID; } static void acpi_system_eventhandler_wakeup(void *arg, int state) { ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state); /* Currently, nothing to do for wakeup. */ return_VOID; } /* * ACPICA Event Handlers (FixedEvent, also called from button notify handler) */ static void acpi_invoke_sleep_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context); } static void acpi_invoke_wake_eventhandler(void *context) { EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context); } UINT32 acpi_event_power_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_power_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_sleep(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } UINT32 acpi_event_sleep_button_wake(void *context) { struct acpi_softc *sc = (struct acpi_softc *)context; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx))) return_VALUE (ACPI_INTERRUPT_NOT_HANDLED); return_VALUE (ACPI_INTERRUPT_HANDLED); } /* * XXX This static buffer is suboptimal. There is no locking so only * use this for single-threaded callers. */ char * acpi_name(ACPI_HANDLE handle) { ACPI_BUFFER buf; static char data[256]; buf.Length = sizeof(data); buf.Pointer = data; if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf))) return (data); return ("(unknown)"); } /* * Debugging/bug-avoidance. Avoid trying to fetch info on various * parts of the namespace. */ int acpi_avoid(ACPI_HANDLE handle) { char *cp, *env, *np; int len; np = acpi_name(handle); if (*np == '\\') np++; if ((env = kern_getenv("debug.acpi.avoid")) == NULL) return (0); /* Scan the avoid list checking for a match */ cp = env; for (;;) { while (*cp != 0 && isspace(*cp)) cp++; if (*cp == 0) break; len = 0; while (cp[len] != 0 && !isspace(cp[len])) len++; if (!strncmp(cp, np, len)) { freeenv(env); return(1); } cp += len; } freeenv(env); return (0); } /* * Debugging/bug-avoidance. Disable ACPI subsystem components. */ int acpi_disabled(char *subsys) { char *cp, *env; int len; if ((env = kern_getenv("debug.acpi.disabled")) == NULL) return (0); if (strcmp(env, "all") == 0) { freeenv(env); return (1); } /* Scan the disable list, checking for a match. */ cp = env; for (;;) { while (*cp != '\0' && isspace(*cp)) cp++; if (*cp == '\0') break; len = 0; while (cp[len] != '\0' && !isspace(cp[len])) len++; if (strncmp(cp, subsys, len) == 0) { freeenv(env); return (1); } cp += len; } freeenv(env); return (0); } static void acpi_lookup(void *arg, const char *name, device_t *dev) { ACPI_HANDLE handle; if (*dev != NULL) return; /* * Allow any handle name that is specified as an absolute path and * starts with '\'. We could restrict this to \_SB and friends, * but see acpi_probe_children() for notes on why we scan the entire * namespace for devices. * * XXX: The pathname argument to AcpiGetHandle() should be fixed to * be const. */ if (name[0] != '\\') return; if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT, __DECONST(char *, name), &handle))) return; *dev = acpi_get_device(handle); } /* * Control interface. * * We multiplex ioctls for all participating ACPI devices here. Individual * drivers wanting to be accessible via /dev/acpi should use the * register/deregister interface to make their handlers visible. */ struct acpi_ioctl_hook { TAILQ_ENTRY(acpi_ioctl_hook) link; u_long cmd; acpi_ioctl_fn fn; void *arg; }; static TAILQ_HEAD(,acpi_ioctl_hook) acpi_ioctl_hooks; static int acpi_ioctl_hooks_initted; int acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg) { struct acpi_ioctl_hook *hp; if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL) return (ENOMEM); hp->cmd = cmd; hp->fn = fn; hp->arg = arg; ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted == 0) { TAILQ_INIT(&acpi_ioctl_hooks); acpi_ioctl_hooks_initted = 1; } TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link); ACPI_UNLOCK(acpi); return (0); } void acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn) { struct acpi_ioctl_hook *hp; ACPI_LOCK(acpi); TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) if (hp->cmd == cmd && hp->fn == fn) break; if (hp != NULL) { TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link); free(hp, M_ACPIDEV); } ACPI_UNLOCK(acpi); } static int acpiopen(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiclose(struct cdev *dev, int flag, int fmt, struct thread *td) { return (0); } static int acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct acpi_softc *sc; struct acpi_ioctl_hook *hp; int error, state; error = 0; hp = NULL; sc = dev->si_drv1; /* * Scan the list of registered ioctls, looking for handlers. */ ACPI_LOCK(acpi); if (acpi_ioctl_hooks_initted) TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) { if (hp->cmd == cmd) break; } ACPI_UNLOCK(acpi); if (hp) return (hp->fn(cmd, addr, hp->arg)); /* * Core ioctls are not permitted for non-writable user. * Currently, other ioctls just fetch information. * Not changing system behavior. */ if ((flag & FWRITE) == 0) return (EPERM); /* Core system ioctls. */ switch (cmd) { case ACPIIO_REQSLPSTATE: state = *(int *)addr; if (state != ACPI_STATE_S5) return (acpi_ReqSleepState(sc, state)); device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n"); error = EOPNOTSUPP; break; case ACPIIO_ACKSLPSTATE: error = *(int *)addr; error = acpi_AckSleepState(sc->acpi_clone, error); break; case ACPIIO_SETSLPSTATE: /* DEPRECATED */ state = *(int *)addr; if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX) return (EINVAL); if (!acpi_sleep_states[state]) return (EOPNOTSUPP); if (ACPI_FAILURE(acpi_SetSleepState(sc, state))) error = ENXIO; break; default: error = ENXIO; break; } return (error); } static int acpi_sname2sstate(const char *sname) { int sstate; if (toupper(sname[0]) == 'S') { sstate = sname[1] - '0'; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 && sname[2] == '\0') return (sstate); } else if (strcasecmp(sname, "NONE") == 0) return (ACPI_STATE_UNKNOWN); return (-1); } static const char * acpi_sstate2sname(int sstate) { static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" }; if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5) return (snames[sstate]); else if (sstate == ACPI_STATE_UNKNOWN) return ("NONE"); return (NULL); } static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { int error; struct sbuf sb; UINT8 state; sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++) if (acpi_sleep_states[state]) sbuf_printf(&sb, "%s ", acpi_sstate2sname(state)); sbuf_trim(&sb); sbuf_finish(&sb); error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); sbuf_delete(&sb); return (error); } static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS) { char sleep_state[10]; int error, new_state, old_state; old_state = *(int *)oidp->oid_arg1; strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state)); error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req); if (error == 0 && req->newptr != NULL) { new_state = acpi_sname2sstate(sleep_state); if (new_state < ACPI_STATE_S1) return (EINVAL); if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state]) return (EOPNOTSUPP); if (new_state != old_state) *(int *)oidp->oid_arg1 = new_state; } return (error); } /* Inform devctl(4) when we receive a Notify. */ void acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify) { char notify_buf[16]; ACPI_BUFFER handle_buf; ACPI_STATUS status; if (subsystem == NULL) return; handle_buf.Pointer = NULL; handle_buf.Length = ACPI_ALLOCATE_BUFFER; status = AcpiNsHandleToPathname(h, &handle_buf, FALSE); if (ACPI_FAILURE(status)) return; snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify); devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf); AcpiOsFree(handle_buf.Pointer); } #ifdef ACPI_DEBUG /* * Support for parsing debug options from the kernel environment. * * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers * by specifying the names of the bits in the debug.acpi.layer and * debug.acpi.level environment variables. Bits may be unset by * prefixing the bit name with !. */ struct debugtag { char *name; UINT32 value; }; static struct debugtag dbg_layer[] = { {"ACPI_UTILITIES", ACPI_UTILITIES}, {"ACPI_HARDWARE", ACPI_HARDWARE}, {"ACPI_EVENTS", ACPI_EVENTS}, {"ACPI_TABLES", ACPI_TABLES}, {"ACPI_NAMESPACE", ACPI_NAMESPACE}, {"ACPI_PARSER", ACPI_PARSER}, {"ACPI_DISPATCHER", ACPI_DISPATCHER}, {"ACPI_EXECUTER", ACPI_EXECUTER}, {"ACPI_RESOURCES", ACPI_RESOURCES}, {"ACPI_CA_DEBUGGER", ACPI_CA_DEBUGGER}, {"ACPI_OS_SERVICES", ACPI_OS_SERVICES}, {"ACPI_CA_DISASSEMBLER", ACPI_CA_DISASSEMBLER}, {"ACPI_ALL_COMPONENTS", ACPI_ALL_COMPONENTS}, {"ACPI_AC_ADAPTER", ACPI_AC_ADAPTER}, {"ACPI_BATTERY", ACPI_BATTERY}, {"ACPI_BUS", ACPI_BUS}, {"ACPI_BUTTON", ACPI_BUTTON}, {"ACPI_EC", ACPI_EC}, {"ACPI_FAN", ACPI_FAN}, {"ACPI_POWERRES", ACPI_POWERRES}, {"ACPI_PROCESSOR", ACPI_PROCESSOR}, {"ACPI_THERMAL", ACPI_THERMAL}, {"ACPI_TIMER", ACPI_TIMER}, {"ACPI_ALL_DRIVERS", ACPI_ALL_DRIVERS}, {NULL, 0} }; static struct debugtag dbg_level[] = { {"ACPI_LV_INIT", ACPI_LV_INIT}, {"ACPI_LV_DEBUG_OBJECT", ACPI_LV_DEBUG_OBJECT}, {"ACPI_LV_INFO", ACPI_LV_INFO}, {"ACPI_LV_REPAIR", ACPI_LV_REPAIR}, {"ACPI_LV_ALL_EXCEPTIONS", ACPI_LV_ALL_EXCEPTIONS}, /* Trace verbosity level 1 [Standard Trace Level] */ {"ACPI_LV_INIT_NAMES", ACPI_LV_INIT_NAMES}, {"ACPI_LV_PARSE", ACPI_LV_PARSE}, {"ACPI_LV_LOAD", ACPI_LV_LOAD}, {"ACPI_LV_DISPATCH", ACPI_LV_DISPATCH}, {"ACPI_LV_EXEC", ACPI_LV_EXEC}, {"ACPI_LV_NAMES", ACPI_LV_NAMES}, {"ACPI_LV_OPREGION", ACPI_LV_OPREGION}, {"ACPI_LV_BFIELD", ACPI_LV_BFIELD}, {"ACPI_LV_TABLES", ACPI_LV_TABLES}, {"ACPI_LV_VALUES", ACPI_LV_VALUES}, {"ACPI_LV_OBJECTS", ACPI_LV_OBJECTS}, {"ACPI_LV_RESOURCES", ACPI_LV_RESOURCES}, {"ACPI_LV_USER_REQUESTS", ACPI_LV_USER_REQUESTS}, {"ACPI_LV_PACKAGE", ACPI_LV_PACKAGE}, {"ACPI_LV_VERBOSITY1", ACPI_LV_VERBOSITY1}, /* Trace verbosity level 2 [Function tracing and memory allocation] */ {"ACPI_LV_ALLOCATIONS", ACPI_LV_ALLOCATIONS}, {"ACPI_LV_FUNCTIONS", ACPI_LV_FUNCTIONS}, {"ACPI_LV_OPTIMIZATIONS", ACPI_LV_OPTIMIZATIONS}, {"ACPI_LV_VERBOSITY2", ACPI_LV_VERBOSITY2}, {"ACPI_LV_ALL", ACPI_LV_ALL}, /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */ {"ACPI_LV_MUTEX", ACPI_LV_MUTEX}, {"ACPI_LV_THREADS", ACPI_LV_THREADS}, {"ACPI_LV_IO", ACPI_LV_IO}, {"ACPI_LV_INTERRUPTS", ACPI_LV_INTERRUPTS}, {"ACPI_LV_VERBOSITY3", ACPI_LV_VERBOSITY3}, /* Exceptionally verbose output -- also used in the global "DebugLevel" */ {"ACPI_LV_AML_DISASSEMBLE", ACPI_LV_AML_DISASSEMBLE}, {"ACPI_LV_VERBOSE_INFO", ACPI_LV_VERBOSE_INFO}, {"ACPI_LV_FULL_TABLES", ACPI_LV_FULL_TABLES}, {"ACPI_LV_EVENTS", ACPI_LV_EVENTS}, {"ACPI_LV_VERBOSE", ACPI_LV_VERBOSE}, {NULL, 0} }; static void acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag) { char *ep; int i, l; int set; while (*cp) { if (isspace(*cp)) { cp++; continue; } ep = cp; while (*ep && !isspace(*ep)) ep++; if (*cp == '!') { set = 0; cp++; if (cp == ep) continue; } else { set = 1; } l = ep - cp; for (i = 0; tag[i].name != NULL; i++) { if (!strncmp(cp, tag[i].name, l)) { if (set) *flag |= tag[i].value; else *flag &= ~tag[i].value; } } cp = ep; } } static void acpi_set_debugging(void *junk) { char *layer, *level; if (cold) { AcpiDbgLayer = 0; AcpiDbgLevel = 0; } layer = kern_getenv("debug.acpi.layer"); level = kern_getenv("debug.acpi.level"); if (layer == NULL && level == NULL) return; printf("ACPI set debug"); if (layer != NULL) { if (strcmp("NONE", layer) != 0) printf(" layer '%s'", layer); acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer); freeenv(layer); } if (level != NULL) { if (strcmp("NONE", level) != 0) printf(" level '%s'", level); acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel); freeenv(level); } printf("\n"); } SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging, NULL); static int acpi_debug_sysctl(SYSCTL_HANDLER_ARGS) { int error, *dbg; struct debugtag *tag; struct sbuf sb; char temp[128]; if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL) return (ENOMEM); if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) { tag = &dbg_layer[0]; dbg = &AcpiDbgLayer; } else { tag = &dbg_level[0]; dbg = &AcpiDbgLevel; } /* Get old values if this is a get request. */ ACPI_SERIAL_BEGIN(acpi); if (*dbg == 0) { sbuf_cpy(&sb, "NONE"); } else if (req->newptr == NULL) { for (; tag->name != NULL; tag++) { if ((*dbg & tag->value) == tag->value) sbuf_printf(&sb, "%s ", tag->name); } } sbuf_trim(&sb); sbuf_finish(&sb); strlcpy(temp, sbuf_data(&sb), sizeof(temp)); sbuf_delete(&sb); error = sysctl_handle_string(oidp, temp, sizeof(temp), req); /* Check for error or no change */ if (error == 0 && req->newptr != NULL) { *dbg = 0; kern_setenv((char *)oidp->oid_arg1, temp); acpi_set_debugging(NULL); } ACPI_SERIAL_END(acpi); return (error); } SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_NEEDGIANT, "debug.acpi.layer", 0, acpi_debug_sysctl, "A", ""); SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING | CTLFLAG_NEEDGIANT, "debug.acpi.level", 0, acpi_debug_sysctl, "A", ""); #endif /* ACPI_DEBUG */ static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS) { int error; int old; old = acpi_debug_objects; error = sysctl_handle_int(oidp, &acpi_debug_objects, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (old == acpi_debug_objects || (old && acpi_debug_objects)) return (0); ACPI_SERIAL_BEGIN(acpi); AcpiGbl_EnableAmlDebugObject = acpi_debug_objects ? TRUE : FALSE; ACPI_SERIAL_END(acpi); return (0); } static int acpi_parse_interfaces(char *str, struct acpi_interface *iface) { char *p; size_t len; int i, j; p = str; while (isspace(*p) || *p == ',') p++; len = strlen(p); if (len == 0) return (0); p = strdup(p, M_TEMP); for (i = 0; i < len; i++) if (p[i] == ',') p[i] = '\0'; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { i += strlen(p + i) + 1; j++; } if (j == 0) { free(p, M_TEMP); return (0); } iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK); iface->num = j; i = j = 0; while (i < len) if (isspace(p[i]) || p[i] == '\0') i++; else { iface->data[j] = p + i; i += strlen(p + i) + 1; j++; } return (j); } static void acpi_free_interfaces(struct acpi_interface *iface) { free(iface->data[0], M_TEMP); free(iface->data, M_TEMP); } static void acpi_reset_interfaces(device_t dev) { struct acpi_interface list; ACPI_STATUS status; int i; if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiInstallInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to install _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "installed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) { for (i = 0; i < list.num; i++) { status = AcpiRemoveInterface(list.data[i]); if (ACPI_FAILURE(status)) device_printf(dev, "failed to remove _OSI(\"%s\"): %s\n", list.data[i], AcpiFormatException(status)); else if (bootverbose) device_printf(dev, "removed _OSI(\"%s\")\n", list.data[i]); } acpi_free_interfaces(&list); } } static int acpi_pm_func(u_long cmd, void *arg, ...) { int state, acpi_state; int error; struct acpi_softc *sc; va_list ap; error = 0; switch (cmd) { case POWER_CMD_SUSPEND: sc = (struct acpi_softc *)arg; if (sc == NULL) { error = EINVAL; goto out; } va_start(ap, arg); state = va_arg(ap, int); va_end(ap); switch (state) { case POWER_SLEEP_STATE_STANDBY: acpi_state = sc->acpi_standby_sx; break; case POWER_SLEEP_STATE_SUSPEND: acpi_state = sc->acpi_suspend_sx; break; case POWER_SLEEP_STATE_HIBERNATE: acpi_state = ACPI_STATE_S4; break; default: error = EINVAL; goto out; } if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state))) error = ENXIO; break; default: error = EINVAL; goto out; } out: return (error); } static void acpi_pm_register(void *arg) { if (!cold || resource_disabled("acpi", 0)) return; power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL); } SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL); diff --git a/sys/dev/acpica/acpi_pci.c b/sys/dev/acpica/acpi_pci.c index c8d37268f466..90618f43bbef 100644 --- a/sys/dev/acpica/acpi_pci.c +++ b/sys/dev/acpica/acpi_pci.c @@ -1,485 +1,481 @@ /*- * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include "opt_iommu.h" #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" /* Hooks for the ACPI CA debugging infrastructure. */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("PCI") struct acpi_pci_devinfo { struct pci_devinfo ap_dinfo; ACPI_HANDLE ap_handle; int ap_flags; }; ACPI_SERIAL_DECL(pci_powerstate, "ACPI PCI power methods"); /* Be sure that ACPI and PCI power states are equivalent. */ CTASSERT(ACPI_STATE_D0 == PCI_POWERSTATE_D0); CTASSERT(ACPI_STATE_D1 == PCI_POWERSTATE_D1); CTASSERT(ACPI_STATE_D2 == PCI_POWERSTATE_D2); CTASSERT(ACPI_STATE_D3 == PCI_POWERSTATE_D3); static struct pci_devinfo *acpi_pci_alloc_devinfo(device_t dev); static int acpi_pci_attach(device_t dev); static void acpi_pci_child_deleted(device_t dev, device_t child); -static int acpi_pci_child_location_str_method(device_t cbdev, - device_t child, char *buf, size_t buflen); +static int acpi_pci_child_location_method(device_t cbdev, + device_t child, struct sbuf *sb); static int acpi_pci_detach(device_t dev); static int acpi_pci_probe(device_t dev); static int acpi_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); static int acpi_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value); static ACPI_STATUS acpi_pci_save_handle(ACPI_HANDLE handle, UINT32 level, void *context, void **status); static int acpi_pci_set_powerstate_method(device_t dev, device_t child, int state); static void acpi_pci_update_device(ACPI_HANDLE handle, device_t pci_child); static bus_dma_tag_t acpi_pci_get_dma_tag(device_t bus, device_t child); static device_method_t acpi_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_pci_probe), DEVMETHOD(device_attach, acpi_pci_attach), DEVMETHOD(device_detach, acpi_pci_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, acpi_pci_read_ivar), DEVMETHOD(bus_write_ivar, acpi_pci_write_ivar), DEVMETHOD(bus_child_deleted, acpi_pci_child_deleted), - DEVMETHOD(bus_child_location_str, acpi_pci_child_location_str_method), + DEVMETHOD(bus_child_location, acpi_pci_child_location_method), DEVMETHOD(bus_get_cpus, acpi_get_cpus), DEVMETHOD(bus_get_dma_tag, acpi_pci_get_dma_tag), DEVMETHOD(bus_get_domain, acpi_get_domain), /* PCI interface */ DEVMETHOD(pci_alloc_devinfo, acpi_pci_alloc_devinfo), DEVMETHOD(pci_child_added, acpi_pci_child_added), DEVMETHOD(pci_set_powerstate, acpi_pci_set_powerstate_method), DEVMETHOD_END }; static devclass_t pci_devclass; DEFINE_CLASS_1(pci, acpi_pci_driver, acpi_pci_methods, sizeof(struct pci_softc), pci_driver); DRIVER_MODULE(acpi_pci, pcib, acpi_pci_driver, pci_devclass, 0, 0); MODULE_DEPEND(acpi_pci, acpi, 1, 1, 1); MODULE_DEPEND(acpi_pci, pci, 1, 1, 1); MODULE_VERSION(acpi_pci, 1); static struct pci_devinfo * acpi_pci_alloc_devinfo(device_t dev) { struct acpi_pci_devinfo *dinfo; dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO); return (&dinfo->ap_dinfo); } static int acpi_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct acpi_pci_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case ACPI_IVAR_HANDLE: *result = (uintptr_t)dinfo->ap_handle; return (0); case ACPI_IVAR_FLAGS: *result = (uintptr_t)dinfo->ap_flags; return (0); } return (pci_read_ivar(dev, child, which, result)); } static int acpi_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct acpi_pci_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case ACPI_IVAR_HANDLE: dinfo->ap_handle = (ACPI_HANDLE)value; return (0); case ACPI_IVAR_FLAGS: dinfo->ap_flags = (int)value; return (0); } return (pci_write_ivar(dev, child, which, value)); } static void acpi_pci_child_deleted(device_t dev, device_t child) { struct acpi_pci_devinfo *dinfo = device_get_ivars(child); if (acpi_get_device(dinfo->ap_handle) == child) AcpiDetachData(dinfo->ap_handle, acpi_fake_objhandler); pci_child_deleted(dev, child); } static int -acpi_pci_child_location_str_method(device_t cbdev, device_t child, char *buf, - size_t buflen) +acpi_pci_child_location_method(device_t cbdev, device_t child, struct sbuf *sb) { - struct acpi_pci_devinfo *dinfo = device_get_ivars(child); - int pxm; - char buf2[32]; - - pci_child_location_str_method(cbdev, child, buf, buflen); + struct acpi_pci_devinfo *dinfo = device_get_ivars(child); + int pxm; - if (dinfo->ap_handle) { - strlcat(buf, " handle=", buflen); - strlcat(buf, acpi_name(dinfo->ap_handle), buflen); + pci_child_location_method(cbdev, child, sb); - if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ap_handle, "_PXM", &pxm))) { - snprintf(buf2, 32, " _PXM=%d", pxm); - strlcat(buf, buf2, buflen); - } - } - return (0); + if (dinfo->ap_handle) { + sbuf_printf(sb, " handle=%s", acpi_name(dinfo->ap_handle)); + if (ACPI_SUCCESS(acpi_GetInteger(dinfo->ap_handle, "_PXM", &pxm))) { + sbuf_printf(sb, " _PXM=%d", pxm); + } + } + return (0); } /* * PCI power manangement */ static int acpi_pci_set_powerstate_method(device_t dev, device_t child, int state) { ACPI_HANDLE h; ACPI_STATUS status; int old_state, error; error = 0; if (state < ACPI_STATE_D0 || state > ACPI_STATE_D3) return (EINVAL); /* * We set the state using PCI Power Management outside of setting * the ACPI state. This means that when powering down a device, we * first shut it down using PCI, and then using ACPI, which lets ACPI * try to power down any Power Resources that are now no longer used. * When powering up a device, we let ACPI set the state first so that * it can enable any needed Power Resources before changing the PCI * power state. */ ACPI_SERIAL_BEGIN(pci_powerstate); old_state = pci_get_powerstate(child); if (old_state < state && pci_do_power_suspend) { error = pci_set_powerstate_method(dev, child, state); if (error) goto out; } h = acpi_get_handle(child); status = acpi_pwr_switch_consumer(h, state); if (ACPI_SUCCESS(status)) { if (bootverbose) device_printf(dev, "set ACPI power state D%d on %s\n", state, acpi_name(h)); } else if (status != AE_NOT_FOUND) device_printf(dev, "failed to set ACPI power state D%d on %s: %s\n", state, acpi_name(h), AcpiFormatException(status)); if (old_state > state && pci_do_power_resume) error = pci_set_powerstate_method(dev, child, state); out: ACPI_SERIAL_END(pci_powerstate); return (error); } static void acpi_pci_update_device(ACPI_HANDLE handle, device_t pci_child) { ACPI_STATUS status; device_t child; /* * Occasionally a PCI device may show up as an ACPI device * with a _HID. (For example, the TabletPC TC1000 has a * second PCI-ISA bridge that has a _HID for an * acpi_sysresource device.) In that case, leave ACPI-CA's * device data pointing at the ACPI-enumerated device. */ child = acpi_get_device(handle); if (child != NULL) { KASSERT(device_get_parent(child) == devclass_get_device(devclass_find("acpi"), 0), ("%s: child (%s)'s parent is not acpi0", __func__, acpi_name(handle))); return; } /* * Update ACPI-CA to use the PCI enumerated device_t for this handle. */ status = AcpiAttachData(handle, acpi_fake_objhandler, pci_child); if (ACPI_FAILURE(status)) printf("WARNING: Unable to attach object data to %s - %s\n", acpi_name(handle), AcpiFormatException(status)); } static ACPI_STATUS acpi_pci_save_handle(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { struct acpi_pci_devinfo *dinfo; device_t child; int func, slot; UINT32 address; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); child = context; if (ACPI_FAILURE(acpi_GetInteger(handle, "_ADR", &address))) return_ACPI_STATUS (AE_OK); slot = ACPI_ADR_PCI_SLOT(address); func = ACPI_ADR_PCI_FUNC(address); dinfo = device_get_ivars(child); if (dinfo->ap_dinfo.cfg.func == func && dinfo->ap_dinfo.cfg.slot == slot) { dinfo->ap_handle = handle; acpi_pci_update_device(handle, child); return_ACPI_STATUS (AE_CTRL_TERMINATE); } return_ACPI_STATUS (AE_OK); } void acpi_pci_child_added(device_t dev, device_t child) { /* * PCI devices are added via the bus scan in the normal PCI * bus driver. As each device is added, the * acpi_pci_child_added() callback walks the ACPI namespace * under the bridge driver to save ACPI handles to all the * devices that appear in the ACPI namespace as immediate * descendants of the bridge. * * XXX: Sometimes PCI devices show up in the ACPI namespace that * pci_add_children() doesn't find. We currently just ignore * these devices. */ AcpiWalkNamespace(ACPI_TYPE_DEVICE, acpi_get_handle(dev), 1, acpi_pci_save_handle, NULL, child, NULL); } static int acpi_pci_probe(device_t dev) { if (acpi_get_handle(dev) == NULL) return (ENXIO); device_set_desc(dev, "ACPI PCI bus"); return (BUS_PROBE_DEFAULT); } static void acpi_pci_bus_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { device_t dev; dev = context; switch (notify) { case ACPI_NOTIFY_BUS_CHECK: mtx_lock(&Giant); BUS_RESCAN(dev); mtx_unlock(&Giant); break; default: device_printf(dev, "unknown notify %#x\n", notify); break; } } static void acpi_pci_device_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { device_t child, dev; ACPI_STATUS status; int error; dev = context; switch (notify) { case ACPI_NOTIFY_DEVICE_CHECK: mtx_lock(&Giant); BUS_RESCAN(dev); mtx_unlock(&Giant); break; case ACPI_NOTIFY_EJECT_REQUEST: child = acpi_get_device(h); if (child == NULL) { device_printf(dev, "no device to eject for %s\n", acpi_name(h)); return; } mtx_lock(&Giant); error = device_detach(child); if (error) { mtx_unlock(&Giant); device_printf(dev, "failed to detach %s: %d\n", device_get_nameunit(child), error); return; } status = acpi_SetInteger(h, "_EJ0", 1); if (ACPI_FAILURE(status)) { mtx_unlock(&Giant); device_printf(dev, "failed to eject %s: %s\n", acpi_name(h), AcpiFormatException(status)); return; } BUS_RESCAN(dev); mtx_unlock(&Giant); break; default: device_printf(dev, "unknown notify %#x for %s\n", notify, acpi_name(h)); break; } } static ACPI_STATUS acpi_pci_install_device_notify_handler(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_HANDLE h; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiGetHandle(handle, "_EJ0", &h))) return_ACPI_STATUS (AE_OK); AcpiInstallNotifyHandler(handle, ACPI_SYSTEM_NOTIFY, acpi_pci_device_notify_handler, context); return_ACPI_STATUS (AE_OK); } static int acpi_pci_attach(device_t dev) { int error; error = pci_attach(dev); if (error) return (error); AcpiInstallNotifyHandler(acpi_get_handle(dev), ACPI_SYSTEM_NOTIFY, acpi_pci_bus_notify_handler, dev); AcpiWalkNamespace(ACPI_TYPE_DEVICE, acpi_get_handle(dev), 1, acpi_pci_install_device_notify_handler, NULL, dev, NULL); return (0); } static ACPI_STATUS acpi_pci_remove_notify_handler(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { ACPI_HANDLE h; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(AcpiGetHandle(handle, "_EJ0", &h))) return_ACPI_STATUS (AE_OK); AcpiRemoveNotifyHandler(handle, ACPI_SYSTEM_NOTIFY, acpi_pci_device_notify_handler); return_ACPI_STATUS (AE_OK); } static int acpi_pci_detach(device_t dev) { AcpiWalkNamespace(ACPI_TYPE_DEVICE, acpi_get_handle(dev), 1, acpi_pci_remove_notify_handler, NULL, dev, NULL); AcpiRemoveNotifyHandler(acpi_get_handle(dev), ACPI_SYSTEM_NOTIFY, acpi_pci_bus_notify_handler); return (pci_detach(dev)); } #ifdef IOMMU static bus_dma_tag_t acpi_pci_get_dma_tag(device_t bus, device_t child) { bus_dma_tag_t tag; if (device_get_parent(child) == bus) { /* try iommu and return if it works */ tag = iommu_get_dma_tag(bus, child); } else tag = NULL; if (tag == NULL) tag = pci_get_dma_tag(bus, child); return (tag); } #else static bus_dma_tag_t acpi_pci_get_dma_tag(device_t bus, device_t child) { return (pci_get_dma_tag(bus, child)); } #endif diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h index fb196c2f4713..980a004e2c35 100644 --- a/sys/dev/acpica/acpivar.h +++ b/sys/dev/acpica/acpivar.h @@ -1,583 +1,583 @@ /*- * Copyright (c) 2000 Mitsuru IWASAKI * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _ACPIVAR_H_ #define _ACPIVAR_H_ #ifdef _KERNEL #include "acpi_if.h" #include "bus_if.h" #include #ifdef INTRNG #include #endif #include #include #include #include #include #include #include #include struct apm_clone_data; struct acpi_softc { device_t acpi_dev; struct cdev *acpi_dev_t; int acpi_enabled; int acpi_sstate; int acpi_sleep_disabled; int acpi_resources_reserved; struct sysctl_ctx_list acpi_sysctl_ctx; struct sysctl_oid *acpi_sysctl_tree; int acpi_power_button_sx; int acpi_sleep_button_sx; int acpi_lid_switch_sx; int acpi_standby_sx; int acpi_suspend_sx; int acpi_sleep_delay; int acpi_s4bios; int acpi_do_disable; int acpi_verbose; int acpi_handle_reboot; vm_offset_t acpi_wakeaddr; vm_paddr_t acpi_wakephys; int acpi_next_sstate; /* Next suspend Sx state. */ struct apm_clone_data *acpi_clone; /* Pseudo-dev for devd(8). */ STAILQ_HEAD(,apm_clone_data) apm_cdevs; /* All apm/apmctl/acpi cdevs. */ struct callout susp_force_to; /* Force suspend if no acks. */ }; struct acpi_device { /* ACPI ivars */ ACPI_HANDLE ad_handle; void *ad_private; int ad_flags; int ad_cls_class; /* Resources */ struct resource_list ad_rl; }; #ifdef INTRNG struct intr_map_data_acpi { struct intr_map_data hdr; u_int irq; u_int pol; u_int trig; }; #endif /* Track device (/dev/{apm,apmctl} and /dev/acpi) notification status. */ struct apm_clone_data { STAILQ_ENTRY(apm_clone_data) entries; struct cdev *cdev; int flags; #define ACPI_EVF_NONE 0 /* /dev/apm semantics */ #define ACPI_EVF_DEVD 1 /* /dev/acpi is handled via devd(8) */ #define ACPI_EVF_WRITE 2 /* Device instance is opened writable. */ int notify_status; #define APM_EV_NONE 0 /* Device not yet aware of pending sleep. */ #define APM_EV_NOTIFIED 1 /* Device saw next sleep state. */ #define APM_EV_ACKED 2 /* Device agreed sleep can occur. */ struct acpi_softc *acpi_sc; struct selinfo sel_read; }; #define ACPI_PRW_MAX_POWERRES 8 struct acpi_prw_data { ACPI_HANDLE gpe_handle; int gpe_bit; int lowest_wake; ACPI_OBJECT power_res[ACPI_PRW_MAX_POWERRES]; int power_res_count; }; /* Flags for each device defined in the AML namespace. */ #define ACPI_FLAG_WAKE_ENABLED 0x1 /* Macros for extracting parts of a PCI address from an _ADR value. */ #define ACPI_ADR_PCI_SLOT(adr) (((adr) & 0xffff0000) >> 16) #define ACPI_ADR_PCI_FUNC(adr) ((adr) & 0xffff) /* * Entry points to ACPI from above are global functions defined in this * file, sysctls, and I/O on the control device. Entry points from below * are interrupts (the SCI), notifies, task queue threads, and the thermal * zone polling thread. * * ACPI tables and global shared data are protected by a global lock * (acpi_mutex). * * Each ACPI device can have its own driver-specific mutex for protecting * shared access to local data. The ACPI_LOCK macros handle mutexes. * * Drivers that need to serialize access to functions (e.g., to route * interrupts, get/set control paths, etc.) should use the sx lock macros * (ACPI_SERIAL). * * ACPI-CA handles its own locking and should not be called with locks held. * * The most complicated path is: * GPE -> EC runs _Qxx -> _Qxx reads EC space -> GPE */ extern struct mtx acpi_mutex; #define ACPI_LOCK(sys) mtx_lock(&sys##_mutex) #define ACPI_UNLOCK(sys) mtx_unlock(&sys##_mutex) #define ACPI_LOCK_ASSERT(sys) mtx_assert(&sys##_mutex, MA_OWNED); #define ACPI_LOCK_DECL(sys, name) \ static struct mtx sys##_mutex; \ MTX_SYSINIT(sys##_mutex, &sys##_mutex, name, MTX_DEF) #define ACPI_SERIAL_BEGIN(sys) sx_xlock(&sys##_sxlock) #define ACPI_SERIAL_END(sys) sx_xunlock(&sys##_sxlock) #define ACPI_SERIAL_ASSERT(sys) sx_assert(&sys##_sxlock, SX_XLOCKED); #define ACPI_SERIAL_DECL(sys, name) \ static struct sx sys##_sxlock; \ SX_SYSINIT(sys##_sxlock, &sys##_sxlock, name) /* * ACPI CA does not define layers for non-ACPI CA drivers. * We define some here within the range provided. */ #define ACPI_AC_ADAPTER 0x00010000 #define ACPI_BATTERY 0x00020000 #define ACPI_BUS 0x00040000 #define ACPI_BUTTON 0x00080000 #define ACPI_EC 0x00100000 #define ACPI_FAN 0x00200000 #define ACPI_POWERRES 0x00400000 #define ACPI_PROCESSOR 0x00800000 #define ACPI_THERMAL 0x01000000 #define ACPI_TIMER 0x02000000 #define ACPI_OEM 0x04000000 /* * Constants for different interrupt models used with acpi_SetIntrModel(). */ #define ACPI_INTR_PIC 0 #define ACPI_INTR_APIC 1 #define ACPI_INTR_SAPIC 2 /* * Various features and capabilities for the acpi_get_features() method. * In particular, these are used for the ACPI 3.0 _PDC and _OSC methods. * See the Intel document titled "Intel Processor Vendor-Specific ACPI", * number 302223-007. */ #define ACPI_CAP_PERF_MSRS (1 << 0) /* Intel SpeedStep PERF_CTL MSRs */ #define ACPI_CAP_C1_IO_HALT (1 << 1) /* Intel C1 "IO then halt" sequence */ #define ACPI_CAP_THR_MSRS (1 << 2) /* Intel OnDemand throttling MSRs */ #define ACPI_CAP_SMP_SAME (1 << 3) /* MP C1, Px, and Tx (all the same) */ #define ACPI_CAP_SMP_SAME_C3 (1 << 4) /* MP C2 and C3 (all the same) */ #define ACPI_CAP_SMP_DIFF_PX (1 << 5) /* MP Px (different, using _PSD) */ #define ACPI_CAP_SMP_DIFF_CX (1 << 6) /* MP Cx (different, using _CSD) */ #define ACPI_CAP_SMP_DIFF_TX (1 << 7) /* MP Tx (different, using _TSD) */ #define ACPI_CAP_SMP_C1_NATIVE (1 << 8) /* MP C1 support other than halt */ #define ACPI_CAP_SMP_C3_NATIVE (1 << 9) /* MP C2 and C3 support */ #define ACPI_CAP_PX_HW_COORD (1 << 11) /* Intel P-state HW coordination */ #define ACPI_CAP_INTR_CPPC (1 << 12) /* Native Interrupt Handling for Collaborative Processor Performance Control notifications */ #define ACPI_CAP_HW_DUTY_C (1 << 13) /* Hardware Duty Cycling */ /* * Quirk flags. * * ACPI_Q_BROKEN: Disables all ACPI support. * ACPI_Q_TIMER: Disables support for the ACPI timer. * ACPI_Q_MADT_IRQ0: Specifies that ISA IRQ 0 is wired up to pin 0 of the * first APIC and that the MADT should force that by ignoring the PC-AT * compatible flag and ignoring overrides that redirect IRQ 0 to pin 2. */ extern int acpi_quirks; #define ACPI_Q_OK 0 #define ACPI_Q_BROKEN (1 << 0) #define ACPI_Q_TIMER (1 << 1) #define ACPI_Q_MADT_IRQ0 (1 << 2) /* * Plug and play information for device matching. Matching table format * is compatible with ids parameter of ACPI_ID_PROBE bus method. * * XXX: While ACPI_ID_PROBE matches against _HID and all _CIDs, current - * acpi_pnpinfo_str() exports only _HID and first _CID. That means second - * and further _CIDs should be added to both acpi_pnpinfo_str() and + * acpi_pnpinfo() exports only _HID and first _CID. That means second + * and further _CIDs should be added to both acpi_pnpinfo() and * ACPICOMPAT_PNP_INFO if device matching against them is required. */ #define ACPICOMPAT_PNP_INFO(t, busname) \ MODULE_PNP_INFO("Z:_HID", busname, t##hid, t, nitems(t)-1); \ MODULE_PNP_INFO("Z:_CID", busname, t##cid, t, nitems(t)-1); #define ACPI_PNP_INFO(t) ACPICOMPAT_PNP_INFO(t, acpi) /* * Note that the low ivar values are reserved to provide * interface compatibility with ISA drivers which can also * attach to ACPI. */ #define ACPI_IVAR_HANDLE 0x100 #define ACPI_IVAR_UNUSED 0x101 /* Unused/reserved. */ #define ACPI_IVAR_PRIVATE 0x102 #define ACPI_IVAR_FLAGS 0x103 /* * Accessor functions for our ivars. Default value for BUS_READ_IVAR is * (type) 0. The accessor functions don't check return values. */ #define __ACPI_BUS_ACCESSOR(varp, var, ivarp, ivar, type) \ \ static __inline type varp ## _get_ ## var(device_t dev) \ { \ uintptr_t v = 0; \ BUS_READ_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, &v); \ return ((type) v); \ } \ \ static __inline void varp ## _set_ ## var(device_t dev, type t) \ { \ uintptr_t v = (uintptr_t) t; \ BUS_WRITE_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, v); \ } __ACPI_BUS_ACCESSOR(acpi, handle, ACPI, HANDLE, ACPI_HANDLE) __ACPI_BUS_ACCESSOR(acpi, private, ACPI, PRIVATE, void *) __ACPI_BUS_ACCESSOR(acpi, flags, ACPI, FLAGS, int) void acpi_fake_objhandler(ACPI_HANDLE h, void *data); static __inline device_t acpi_get_device(ACPI_HANDLE handle) { void *dev = NULL; AcpiGetData(handle, acpi_fake_objhandler, &dev); return ((device_t)dev); } static __inline ACPI_OBJECT_TYPE acpi_get_type(device_t dev) { ACPI_HANDLE h; ACPI_OBJECT_TYPE t; if ((h = acpi_get_handle(dev)) == NULL) return (ACPI_TYPE_NOT_FOUND); if (ACPI_FAILURE(AcpiGetType(h, &t))) return (ACPI_TYPE_NOT_FOUND); return (t); } /* Find the difference between two PM tick counts. */ static __inline uint32_t acpi_TimerDelta(uint32_t end, uint32_t start) { if (end < start && (AcpiGbl_FADT.Flags & ACPI_FADT_32BIT_TIMER) == 0) end |= 0x01000000; return (end - start); } #ifdef ACPI_DEBUGGER void acpi_EnterDebugger(void); #endif #ifdef ACPI_DEBUG #include #define STEP(x) do {printf x, printf("\n"); cngetc();} while (0) #else #define STEP(x) #endif #define ACPI_VPRINT(dev, acpi_sc, x...) do { \ if (acpi_get_verbose(acpi_sc)) \ device_printf(dev, x); \ } while (0) /* Values for the first status word returned by _OSC. */ #define ACPI_OSC_FAILURE (1 << 1) #define ACPI_OSC_BAD_UUID (1 << 2) #define ACPI_OSC_BAD_REVISION (1 << 3) #define ACPI_OSC_CAPS_MASKED (1 << 4) #define ACPI_DEVINFO_PRESENT(x, flags) \ (((x) & (flags)) == (flags)) #define ACPI_DEVICE_PRESENT(x) \ ACPI_DEVINFO_PRESENT(x, ACPI_STA_DEVICE_PRESENT | \ ACPI_STA_DEVICE_FUNCTIONING) #define ACPI_BATTERY_PRESENT(x) \ ACPI_DEVINFO_PRESENT(x, ACPI_STA_DEVICE_PRESENT | \ ACPI_STA_DEVICE_FUNCTIONING | ACPI_STA_BATTERY_PRESENT) /* Callback function type for walking subtables within a table. */ typedef void acpi_subtable_handler(ACPI_SUBTABLE_HEADER *, void *); BOOLEAN acpi_DeviceIsPresent(device_t dev); BOOLEAN acpi_BatteryIsPresent(device_t dev); ACPI_STATUS acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result); ACPI_BUFFER *acpi_AllocBuffer(int size); ACPI_STATUS acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number); ACPI_STATUS acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number); ACPI_STATUS acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number); ACPI_STATUS acpi_ForeachPackageObject(ACPI_OBJECT *obj, void (*func)(ACPI_OBJECT *comp, void *arg), void *arg); ACPI_STATUS acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp); ACPI_STATUS acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res); UINT64 acpi_DSMQuery(ACPI_HANDLE h, const uint8_t *uuid, int revision); ACPI_STATUS acpi_EvaluateDSM(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf); ACPI_STATUS acpi_EvaluateDSMTyped(ACPI_HANDLE handle, const uint8_t *uuid, int revision, UINT64 function, ACPI_OBJECT *package, ACPI_BUFFER *out_buf, ACPI_OBJECT_TYPE type); ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid, int revision, int count, uint32_t *caps_in, uint32_t *caps_out, bool query); ACPI_STATUS acpi_OverrideInterruptLevel(UINT32 InterruptNumber); ACPI_STATUS acpi_SetIntrModel(int model); int acpi_ReqSleepState(struct acpi_softc *sc, int state); int acpi_AckSleepState(struct apm_clone_data *clone, int error); ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state); int acpi_wake_set_enable(device_t dev, int enable); int acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw); ACPI_STATUS acpi_Startup(void); void acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify); int acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas, struct resource **res, u_int flags); void acpi_walk_subtables(void *first, void *end, acpi_subtable_handler *handler, void *arg); BOOLEAN acpi_has_hid(ACPI_HANDLE handle); int acpi_MatchHid(ACPI_HANDLE h, const char *hid); #define ACPI_MATCHHID_NOMATCH 0 #define ACPI_MATCHHID_HID 1 #define ACPI_MATCHHID_CID 2 struct acpi_parse_resource_set { void (*set_init)(device_t dev, void *arg, void **context); void (*set_done)(device_t dev, void *context); void (*set_ioport)(device_t dev, void *context, uint64_t base, uint64_t length); void (*set_iorange)(device_t dev, void *context, uint64_t low, uint64_t high, uint64_t length, uint64_t align); void (*set_memory)(device_t dev, void *context, uint64_t base, uint64_t length); void (*set_memoryrange)(device_t dev, void *context, uint64_t low, uint64_t high, uint64_t length, uint64_t align); void (*set_irq)(device_t dev, void *context, uint8_t *irq, int count, int trig, int pol); void (*set_ext_irq)(device_t dev, void *context, uint32_t *irq, int count, int trig, int pol); void (*set_drq)(device_t dev, void *context, uint8_t *drq, int count); void (*set_start_dependent)(device_t dev, void *context, int preference); void (*set_end_dependent)(device_t dev, void *context); }; extern struct acpi_parse_resource_set acpi_res_parse_set; int acpi_identify(void); void acpi_config_intr(device_t dev, ACPI_RESOURCE *res); #ifdef INTRNG int acpi_map_intr(device_t dev, u_int irq, ACPI_HANDLE handle); #endif ACPI_STATUS acpi_lookup_irq_resource(device_t dev, int rid, struct resource *res, ACPI_RESOURCE *acpi_res); ACPI_STATUS acpi_parse_resources(device_t dev, ACPI_HANDLE handle, struct acpi_parse_resource_set *set, void *arg); struct resource *acpi_alloc_sysres(device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); /* ACPI event handling */ UINT32 acpi_event_power_button_sleep(void *context); UINT32 acpi_event_power_button_wake(void *context); UINT32 acpi_event_sleep_button_sleep(void *context); UINT32 acpi_event_sleep_button_wake(void *context); #define ACPI_EVENT_PRI_FIRST 0 #define ACPI_EVENT_PRI_DEFAULT 10000 #define ACPI_EVENT_PRI_LAST 20000 typedef void (*acpi_event_handler_t)(void *, int); EVENTHANDLER_DECLARE(acpi_sleep_event, acpi_event_handler_t); EVENTHANDLER_DECLARE(acpi_wakeup_event, acpi_event_handler_t); EVENTHANDLER_DECLARE(acpi_acad_event, acpi_event_handler_t); EVENTHANDLER_DECLARE(acpi_video_event, acpi_event_handler_t); /* Device power control. */ ACPI_STATUS acpi_pwr_wake_enable(ACPI_HANDLE consumer, int enable); ACPI_STATUS acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state); int acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate); int acpi_set_powerstate(device_t child, int state); /* APM emulation */ void acpi_apm_init(struct acpi_softc *); /* Misc. */ static __inline struct acpi_softc * acpi_device_get_parent_softc(device_t child) { device_t parent; parent = device_get_parent(child); if (parent == NULL) return (NULL); return (device_get_softc(parent)); } static __inline int acpi_get_verbose(struct acpi_softc *sc) { if (sc) return (sc->acpi_verbose); return (0); } char *acpi_name(ACPI_HANDLE handle); int acpi_avoid(ACPI_HANDLE handle); int acpi_disabled(char *subsys); int acpi_machdep_init(device_t dev); void acpi_install_wakeup_handler(struct acpi_softc *sc); int acpi_sleep_machdep(struct acpi_softc *sc, int state); int acpi_wakeup_machdep(struct acpi_softc *sc, int state, int sleep_result, int intr_enabled); int acpi_table_quirks(int *quirks); int acpi_machdep_quirks(int *quirks); -int acpi_pnpinfo_str(ACPI_HANDLE handle, char *buf, size_t buflen); +int acpi_pnpinfo(ACPI_HANDLE handle, struct sbuf *sb); uint32_t hpet_get_uid(device_t dev); /* Battery Abstraction. */ struct acpi_battinfo; int acpi_battery_register(device_t dev); int acpi_battery_remove(device_t dev); int acpi_battery_get_units(void); int acpi_battery_get_info_expire(void); int acpi_battery_bst_valid(struct acpi_bst *bst); int acpi_battery_bix_valid(struct acpi_bix *bix); int acpi_battery_get_battinfo(device_t dev, struct acpi_battinfo *info); /* Embedded controller. */ void acpi_ec_ecdt_probe(device_t); /* AC adapter interface. */ int acpi_acad_get_acline(int *); /* Package manipulation convenience functions. */ #define ACPI_PKG_VALID(pkg, size) \ ((pkg) != NULL && (pkg)->Type == ACPI_TYPE_PACKAGE && \ (pkg)->Package.Count >= (size)) #define ACPI_PKG_VALID_EQ(pkg, size) \ ((pkg) != NULL && (pkg)->Type == ACPI_TYPE_PACKAGE && \ (pkg)->Package.Count == (size)) int acpi_PkgInt(ACPI_OBJECT *res, int idx, UINT64 *dst); int acpi_PkgInt32(ACPI_OBJECT *res, int idx, uint32_t *dst); int acpi_PkgInt16(ACPI_OBJECT *res, int idx, uint16_t *dst); int acpi_PkgStr(ACPI_OBJECT *res, int idx, void *dst, size_t size); int acpi_PkgGas(device_t dev, ACPI_OBJECT *res, int idx, int *type, int *rid, struct resource **dst, u_int flags); int acpi_PkgFFH_IntelCpu(ACPI_OBJECT *res, int idx, int *vendor, int *class, uint64_t *address, int *accsize); ACPI_HANDLE acpi_GetReference(ACPI_HANDLE scope, ACPI_OBJECT *obj); /* * Base level for BUS_ADD_CHILD. Special devices are added at orders less * than this, and normal devices at or above this level. This keeps the * probe order sorted so that things like sysresource are available before * their children need them. */ #define ACPI_DEV_BASE_ORDER 100 /* Default maximum number of tasks to enqueue. */ #ifndef ACPI_MAX_TASKS #define ACPI_MAX_TASKS MAX(32, MAXCPU * 4) #endif /* Default number of task queue threads to start. */ #ifndef ACPI_MAX_THREADS #define ACPI_MAX_THREADS 3 #endif /* Use the device logging level for ktr(4). */ #define KTR_ACPI KTR_DEV SYSCTL_DECL(_debug_acpi); /* * Parse and use proximity information in SRAT and SLIT. */ int acpi_pxm_init(int ncpus, vm_paddr_t maxphys); void acpi_pxm_parse_tables(void); void acpi_pxm_set_mem_locality(void); void acpi_pxm_set_cpu_locality(void); int acpi_pxm_get_cpu_locality(int apic_id); /* * Map a PXM to a VM domain. * * Returns the VM domain ID if found, or -1 if not found / invalid. */ int acpi_map_pxm_to_vm_domainid(int pxm); int acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset); int acpi_get_domain(device_t dev, device_t child, int *domain); #ifdef __aarch64__ /* * ARM specific ACPI interfaces, relating to IORT table. */ int acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid); int acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, u_int *xref, u_int *devid); int acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm); #endif #endif /* _KERNEL */ #endif /* !_ACPIVAR_H_ */ diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c index 8991d9c23fbc..47a5ef455f82 100644 --- a/sys/dev/ahci/ahci.c +++ b/sys/dev/ahci/ahci.c @@ -1,2906 +1,2906 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include "ahci.h" #include #include #include #include #include /* local prototypes */ static void ahci_intr(void *data); static void ahci_intr_one(void *data); static void ahci_intr_one_edge(void *data); static int ahci_ch_init(device_t dev); static int ahci_ch_deinit(device_t dev); static int ahci_ch_suspend(device_t dev); static int ahci_ch_resume(device_t dev); static void ahci_ch_pm(void *arg); static void ahci_ch_intr(void *arg); static void ahci_ch_intr_direct(void *arg); static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus); static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb); static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void ahci_execute_transaction(struct ahci_slot *slot); static void ahci_timeout(void *arg); static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et); static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag); static void ahci_dmainit(device_t dev); static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void ahci_dmafini(device_t dev); static void ahci_slotsalloc(device_t dev); static void ahci_slotsfree(device_t dev); static void ahci_reset(struct ahci_channel *ch); static void ahci_start(struct ahci_channel *ch, int fbs); static void ahci_stop(struct ahci_channel *ch); static void ahci_clo(struct ahci_channel *ch); static void ahci_start_fr(struct ahci_channel *ch); static void ahci_stop_fr(struct ahci_channel *ch); static int ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr); static uint32_t ahci_ch_detval(struct ahci_channel *ch, uint32_t val); static int ahci_sata_connect(struct ahci_channel *ch); static int ahci_sata_phy_reset(struct ahci_channel *ch); static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0); static void ahci_issue_recovery(struct ahci_channel *ch); static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb); static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb); static void ahciaction(struct cam_sim *sim, union ccb *ccb); static void ahcipoll(struct cam_sim *sim); static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers"); #define recovery_type spriv_field0 #define RECOVERY_NONE 0 #define RECOVERY_READ_LOG 1 #define RECOVERY_REQUEST_SENSE 2 #define recovery_slot spriv_field1 static uint32_t ahci_ch_detval(struct ahci_channel *ch, uint32_t val) { return ch->disablephy ? ATA_SC_DET_DISABLE : val; } int ahci_ctlr_setup(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); /* Clear interrupts */ ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS)); /* Configure CCC */ if (ctlr->ccc) { ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI)); ATA_OUTL(ctlr->r_mem, AHCI_CCCC, (ctlr->ccc << AHCI_CCCC_TV_SHIFT) | (4 << AHCI_CCCC_CC_SHIFT) | AHCI_CCCC_EN); ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) & AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT; if (bootverbose) { device_printf(dev, "CCC with %dms/4cmd enabled on vector %d\n", ctlr->ccc, ctlr->cccv); } } /* Enable AHCI interrupts */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE); return (0); } int ahci_ctlr_reset(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); uint32_t v; int timeout; /* BIOS/OS Handoff */ if ((ATA_INL(ctlr->r_mem, AHCI_VS) >= 0x00010200) && (ATA_INL(ctlr->r_mem, AHCI_CAP2) & AHCI_CAP2_BOH) && ((v = ATA_INL(ctlr->r_mem, AHCI_BOHC)) & AHCI_BOHC_OOS) == 0) { /* Request OS ownership. */ ATA_OUTL(ctlr->r_mem, AHCI_BOHC, v | AHCI_BOHC_OOS); /* Wait up to 2s for BIOS ownership release. */ for (timeout = 0; timeout < 80; timeout++) { DELAY(25000); v = ATA_INL(ctlr->r_mem, AHCI_BOHC); if ((v & AHCI_BOHC_BOS) == 0) break; if ((v & AHCI_BOHC_BB) == 0) break; } } /* Enable AHCI mode */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); /* Reset AHCI controller */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR); for (timeout = 1000; timeout > 0; timeout--) { DELAY(1000); if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0) break; } if (timeout == 0) { device_printf(dev, "AHCI controller reset failure\n"); return (ENXIO); } /* Reenable AHCI mode */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); if (ctlr->quirks & AHCI_Q_RESTORE_CAP) { /* * Restore capability field. * This is write to a read-only register to restore its state. * On fully standard-compliant hardware this is not needed and * this operation shall not take place. See ahci_pci.c for * platforms using this quirk. */ ATA_OUTL(ctlr->r_mem, AHCI_CAP, ctlr->caps); } return (0); } int ahci_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int error, i, speed, unit; uint32_t u, version; device_t child; ctlr->dev = dev; ctlr->ccc = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "ccc", &ctlr->ccc); mtx_init(&ctlr->ch_mtx, "AHCI channels lock", NULL, MTX_DEF); /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { ahci_free_mem(dev); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { ahci_free_mem(dev); rman_fini(&ctlr->sc_iomem); return (error); } /* Get the HW capabilities */ version = ATA_INL(ctlr->r_mem, AHCI_VS); ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP); if (version >= 0x00010200) ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2); if (ctlr->caps & AHCI_CAP_EMS) ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL); if (ctlr->quirks & AHCI_Q_FORCE_PI) { /* * Enable ports. * The spec says that BIOS sets up bits corresponding to * available ports. On platforms where this information * is missing, the driver can define available ports on its own. */ int nports = (ctlr->caps & AHCI_CAP_NPMASK) + 1; int nmask = (1 << nports) - 1; ATA_OUTL(ctlr->r_mem, AHCI_PI, nmask); device_printf(dev, "Forcing PI to %d ports (mask = %x)\n", nports, nmask); } ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI); /* Identify and set separate quirks for HBA and RAID f/w Marvells. */ if ((ctlr->quirks & AHCI_Q_ALTSIG) && (ctlr->caps & AHCI_CAP_SPM) == 0) ctlr->quirks |= AHCI_Q_NOBSYRES; if (ctlr->quirks & AHCI_Q_1CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->ichannels &= 0x01; } if (ctlr->quirks & AHCI_Q_2CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->caps |= 1; ctlr->ichannels &= 0x03; } if (ctlr->quirks & AHCI_Q_4CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->caps |= 3; ctlr->ichannels &= 0x0f; } ctlr->channels = MAX(flsl(ctlr->ichannels), (ctlr->caps & AHCI_CAP_NPMASK) + 1); if (ctlr->quirks & AHCI_Q_NOPMP) ctlr->caps &= ~AHCI_CAP_SPM; if (ctlr->quirks & AHCI_Q_NONCQ) ctlr->caps &= ~AHCI_CAP_SNCQ; if ((ctlr->caps & AHCI_CAP_CCCS) == 0) ctlr->ccc = 0; ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC); /* Create controller-wide DMA tag. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, (ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, ctlr->dma_coherent ? BUS_DMA_COHERENT : 0, NULL, NULL, &ctlr->dma_tag)) { ahci_free_mem(dev); rman_fini(&ctlr->sc_iomem); return (ENXIO); } ahci_ctlr_setup(dev); /* Setup interrupts. */ if ((error = ahci_setup_interrupt(dev)) != 0) { bus_dma_tag_destroy(ctlr->dma_tag); ahci_free_mem(dev); rman_fini(&ctlr->sc_iomem); return (error); } i = 0; for (u = ctlr->ichannels; u != 0; u >>= 1) i += (u & 1); ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3)); resource_int_value(device_get_name(dev), device_get_unit(dev), "direct", &ctlr->direct); /* Announce HW capabilities. */ speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT; device_printf(dev, "AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n", ((version >> 20) & 0xf0) + ((version >> 16) & 0x0f), ((version >> 4) & 0xf0) + (version & 0x0f), (ctlr->caps & AHCI_CAP_NPMASK) + 1, ((speed == 1) ? "1.5":((speed == 2) ? "3": ((speed == 3) ? "6":"?"))), (ctlr->caps & AHCI_CAP_SPM) ? "supported" : "not supported", (ctlr->caps & AHCI_CAP_FBSS) ? " with FBS" : ""); if (ctlr->quirks != 0) { device_printf(dev, "quirks=0x%b\n", ctlr->quirks, AHCI_Q_BIT_STRING); } if (bootverbose) { device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps", (ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"", (ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"", (ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"", (ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"", (ctlr->caps & AHCI_CAP_SSS) ? " SS":"", (ctlr->caps & AHCI_CAP_SALP) ? " ALP":"", (ctlr->caps & AHCI_CAP_SAL) ? " AL":"", (ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"", ((speed == 1) ? "1.5":((speed == 2) ? "3": ((speed == 3) ? "6":"?")))); printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n", (ctlr->caps & AHCI_CAP_SAM) ? " AM":"", (ctlr->caps & AHCI_CAP_SPM) ? " PM":"", (ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"", (ctlr->caps & AHCI_CAP_PMD) ? " PMD":"", (ctlr->caps & AHCI_CAP_SSC) ? " SSC":"", (ctlr->caps & AHCI_CAP_PSC) ? " PSC":"", ((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1, (ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"", (ctlr->caps & AHCI_CAP_EMS) ? " EM":"", (ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"", (ctlr->caps & AHCI_CAP_NPMASK) + 1); } if (bootverbose && version >= 0x00010200) { device_printf(dev, "Caps2:%s%s%s%s%s%s\n", (ctlr->caps2 & AHCI_CAP2_DESO) ? " DESO":"", (ctlr->caps2 & AHCI_CAP2_SADM) ? " SADM":"", (ctlr->caps2 & AHCI_CAP2_SDS) ? " SDS":"", (ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"", (ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"", (ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":""); } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "ahcich", -1); if (child == NULL) { device_printf(dev, "failed to add channel device\n"); continue; } device_set_ivars(child, (void *)(intptr_t)unit); if ((ctlr->ichannels & (1 << unit)) == 0) device_disable(child); } /* Attach any remapped NVME device */ for (; unit < ctlr->channels + ctlr->remapped_devices; unit++) { child = device_add_child(dev, "nvme", -1); if (child == NULL) { device_printf(dev, "failed to add remapped NVMe device"); continue; } device_set_ivars(child, (void *)(intptr_t)(unit | AHCI_REMAPPED_UNIT)); } if (ctlr->caps & AHCI_CAP_EMS) { child = device_add_child(dev, "ahciem", -1); if (child == NULL) device_printf(dev, "failed to add enclosure device\n"); else device_set_ivars(child, (void *)(intptr_t)AHCI_EM_UNIT); } bus_generic_attach(dev); return (0); } int ahci_detach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int i; /* Detach & delete all children */ device_delete_children(dev); /* Free interrupts. */ for (i = 0; i < ctlr->numirqs; i++) { if (ctlr->irqs[i].r_irq) { bus_teardown_intr(dev, ctlr->irqs[i].r_irq, ctlr->irqs[i].handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq); } } bus_dma_tag_destroy(ctlr->dma_tag); /* Free memory. */ rman_fini(&ctlr->sc_iomem); ahci_free_mem(dev); mtx_destroy(&ctlr->ch_mtx); return (0); } void ahci_free_mem(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); /* Release memory resources */ if (ctlr->r_mem) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); if (ctlr->r_msix_table) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_msix_tab_rid, ctlr->r_msix_table); if (ctlr->r_msix_pba) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_msix_pba_rid, ctlr->r_msix_pba); ctlr->r_msix_pba = ctlr->r_mem = ctlr->r_msix_table = NULL; } int ahci_setup_interrupt(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int i; /* Check for single MSI vector fallback. */ if (ctlr->numirqs > 1 && (ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) { device_printf(dev, "Falling back to one MSI\n"); ctlr->numirqs = 1; } /* Ensure we don't overrun irqs. */ if (ctlr->numirqs > AHCI_MAX_IRQS) { device_printf(dev, "Too many irqs %d > %d (clamping)\n", ctlr->numirqs, AHCI_MAX_IRQS); ctlr->numirqs = AHCI_MAX_IRQS; } /* Allocate all IRQs. */ for (i = 0; i < ctlr->numirqs; i++) { ctlr->irqs[i].ctlr = ctlr; ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0); if (ctlr->channels == 1 && !ctlr->ccc && ctlr->msi) ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; else if (ctlr->numirqs == 1 || i >= ctlr->channels || (ctlr->ccc && i == ctlr->cccv)) ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL; else if (ctlr->channels > ctlr->numirqs && i == ctlr->numirqs - 1) ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER; else ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return (ENXIO); } if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL, (ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr : ((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge : ahci_intr_one), &ctlr->irqs[i], &ctlr->irqs[i].handle))) { /* SOS XXX release r_irq */ device_printf(dev, "unable to setup interrupt\n"); return (ENXIO); } if (ctlr->numirqs > 1) { bus_describe_intr(dev, ctlr->irqs[i].r_irq, ctlr->irqs[i].handle, ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ? "ch%d" : "%d", i); } } return (0); } /* * Common case interrupt handler. */ static void ahci_intr(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; u_int32_t is, ise = 0; void *arg; int unit; if (irq->mode == AHCI_IRQ_MODE_ALL) { unit = 0; if (ctlr->ccc) is = ctlr->ichannels; else is = ATA_INL(ctlr->r_mem, AHCI_IS); } else { /* AHCI_IRQ_MODE_AFTER */ unit = irq->r_irq_rid - 1; is = ATA_INL(ctlr->r_mem, AHCI_IS); is &= (0xffffffff << unit); } /* CCC interrupt is edge triggered. */ if (ctlr->ccc) ise = 1 << ctlr->cccv; /* Some controllers have edge triggered IS. */ if (ctlr->quirks & AHCI_Q_EDGEIS) ise |= is; if (ise != 0) ATA_OUTL(ctlr->r_mem, AHCI_IS, ise); for (; unit < ctlr->channels; unit++) { if ((is & (1 << unit)) != 0 && (arg = ctlr->interrupt[unit].argument)) { ctlr->interrupt[unit].function(arg); } } for (; unit < ctlr->channels + ctlr->remapped_devices; unit++) { if ((arg = ctlr->interrupt[unit].argument)) { ctlr->interrupt[unit].function(arg); } } /* AHCI declares level triggered IS. */ if (!(ctlr->quirks & AHCI_Q_EDGEIS)) ATA_OUTL(ctlr->r_mem, AHCI_IS, is); ATA_RBL(ctlr->r_mem, AHCI_IS); } /* * Simplified interrupt handler for multivector MSI mode. */ static void ahci_intr_one(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; void *arg; int unit; unit = irq->r_irq_rid - 1; if ((arg = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(arg); /* AHCI declares level triggered IS. */ ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); ATA_RBL(ctlr->r_mem, AHCI_IS); } static void ahci_intr_one_edge(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; void *arg; int unit; unit = irq->r_irq_rid - 1; /* Some controllers have edge triggered IS. */ ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); if ((arg = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(arg); ATA_RBL(ctlr->r_mem, AHCI_IS); } struct resource * ahci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ahci_controller *ctlr = device_get_softc(dev); struct resource *res; rman_res_t st; int offset, size, unit; bool is_em, is_remapped; unit = (intptr_t)device_get_ivars(child); is_em = is_remapped = false; if (unit & AHCI_REMAPPED_UNIT) { unit &= AHCI_UNIT; unit -= ctlr->channels; is_remapped = true; } else if (unit & AHCI_EM_UNIT) { unit &= AHCI_UNIT; is_em = true; } res = NULL; switch (type) { case SYS_RES_MEMORY: if (is_remapped) { offset = ctlr->remap_offset + unit * ctlr->remap_size; size = ctlr->remap_size; } else if (!is_em) { offset = AHCI_OFFSET + (unit << 7); size = 128; } else if (*rid == 0) { offset = AHCI_EM_CTL; size = 4; } else { offset = (ctlr->emloc & 0xffff0000) >> 14; size = (ctlr->emloc & 0x0000ffff) << 2; if (*rid != 1) { if (*rid == 2 && (ctlr->capsem & (AHCI_EM_XMT | AHCI_EM_SMB)) == 0) offset += size; else break; } } st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + size - 1, size, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, 128, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irqs[0].r_irq; break; } return (res); } int ahci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { switch (type) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: if (rid != ATA_IRQ_RID) return (ENOENT); return (0); } return (EINVAL); } int ahci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct ahci_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child) & AHCI_UNIT; if (filter != NULL) { printf("ahci.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } int ahci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct ahci_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child) & AHCI_UNIT; ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } int ahci_print_child(device_t dev, device_t child) { intptr_t ivars; int retval; retval = bus_print_child_header(dev, child); ivars = (intptr_t)device_get_ivars(child); if ((ivars & AHCI_EM_UNIT) == 0) retval += printf(" at channel %d", (int)ivars & AHCI_UNIT); retval += bus_print_child_footer(dev, child); return (retval); } int -ahci_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) +ahci_child_location(device_t dev, device_t child, struct sbuf *sb) { intptr_t ivars; ivars = (intptr_t)device_get_ivars(child); if ((ivars & AHCI_EM_UNIT) == 0) - snprintf(buf, buflen, "channel=%d", (int)ivars & AHCI_UNIT); + sbuf_printf(sb, "channel=%d", (int)ivars & AHCI_UNIT); return (0); } bus_dma_tag_t ahci_get_dma_tag(device_t dev, device_t child) { struct ahci_controller *ctlr = device_get_softc(dev); return (ctlr->dma_tag); } void ahci_attached(device_t dev, struct ahci_channel *ch) { struct ahci_controller *ctlr = device_get_softc(dev); mtx_lock(&ctlr->ch_mtx); ctlr->ch[ch->unit] = ch; mtx_unlock(&ctlr->ch_mtx); } void ahci_detached(device_t dev, struct ahci_channel *ch) { struct ahci_controller *ctlr = device_get_softc(dev); mtx_lock(&ctlr->ch_mtx); mtx_lock(&ch->mtx); ctlr->ch[ch->unit] = NULL; mtx_unlock(&ch->mtx); mtx_unlock(&ctlr->ch_mtx); } struct ahci_channel * ahci_getch(device_t dev, int n) { struct ahci_controller *ctlr = device_get_softc(dev); struct ahci_channel *ch; KASSERT(n >= 0 && n < AHCI_MAX_PORTS, ("Bad channel number %d", n)); mtx_lock(&ctlr->ch_mtx); ch = ctlr->ch[n]; if (ch != NULL) mtx_lock(&ch->mtx); mtx_unlock(&ctlr->ch_mtx); return (ch); } void ahci_putch(struct ahci_channel *ch) { mtx_unlock(&ch->mtx); } static int ahci_ch_probe(device_t dev) { device_set_desc_copy(dev, "AHCI channel"); return (BUS_PROBE_DEFAULT); } static int ahci_ch_disablephy_proc(SYSCTL_HANDLER_ARGS) { struct ahci_channel *ch; int error, value; ch = arg1; value = ch->disablephy; error = sysctl_handle_int(oidp, &value, 0, req); if (error != 0 || req->newptr == NULL || (value != 0 && value != 1)) return (error); mtx_lock(&ch->mtx); ch->disablephy = value; if (value) { ahci_ch_deinit(ch->dev); } else { ahci_ch_init(ch->dev); ahci_phy_check_events(ch, ATA_SE_PHY_CHANGED | ATA_SE_EXCHANGED); } mtx_unlock(&ch->mtx); return (0); } static int ahci_ch_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ahci_channel *ch = device_get_softc(dev); struct cam_devq *devq; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; int rid, error, i, sata_rev = 0; u_int32_t version; ch->dev = dev; ch->unit = (intptr_t)device_get_ivars(dev); ch->caps = ctlr->caps; ch->caps2 = ctlr->caps2; ch->start = ctlr->ch_start; ch->quirks = ctlr->quirks; ch->vendorid = ctlr->vendorid; ch->deviceid = ctlr->deviceid; ch->subvendorid = ctlr->subvendorid; ch->subdeviceid = ctlr->subdeviceid; ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1; mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF); ch->pm_level = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "pm_level", &ch->pm_level); STAILQ_INIT(&ch->doneq); if (ch->pm_level > 3) callout_init_mtx(&ch->pm_timer, &ch->mtx, 0); callout_init_mtx(&ch->reset_timer, &ch->mtx, 0); /* JMicron external ports (0) sometimes limited */ if ((ctlr->quirks & AHCI_Q_SATA1_UNIT0) && ch->unit == 0) sata_rev = 1; if (ch->quirks & AHCI_Q_SATA2) sata_rev = 2; resource_int_value(device_get_name(dev), device_get_unit(dev), "sata_rev", &sata_rev); for (i = 0; i < 16; i++) { ch->user[i].revision = sata_rev; ch->user[i].mode = 0; ch->user[i].bytecount = 8192; ch->user[i].tags = ch->numslots; ch->user[i].caps = 0; ch->curr[i] = ch->user[i]; if (ch->pm_level) { ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ | CTS_SATA_CAPS_H_APST | CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST; } ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA | CTS_SATA_CAPS_H_AN; } rid = 0; if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE))) return (ENXIO); ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD); version = ATA_INL(ctlr->r_mem, AHCI_VS); if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS)) ch->chcaps |= AHCI_P_CMD_FBSCP; if (ch->caps2 & AHCI_CAP2_SDS) ch->chscaps = ATA_INL(ch->r_mem, AHCI_P_DEVSLP); if (bootverbose) { device_printf(dev, "Caps:%s%s%s%s%s%s\n", (ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"", (ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"", (ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"", (ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"", (ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":"", (ch->chscaps & AHCI_P_DEVSLP_DSP) ? " DSP":""); } ahci_dmainit(dev); ahci_slotsalloc(dev); mtx_lock(&ch->mtx); ahci_ch_init(dev); rid = ATA_IRQ_RID; if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "Unable to map interrupt\n"); error = ENXIO; goto err0; } if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr, ch, &ch->ih))) { device_printf(dev, "Unable to setup interrupt\n"); error = ENXIO; goto err1; } /* Create the device queue for our SIM. */ devq = cam_simq_alloc(ch->numslots); if (devq == NULL) { device_printf(dev, "Unable to allocate simq\n"); error = ENOMEM; goto err1; } /* Construct SIM entry */ ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch, device_get_unit(dev), (struct mtx *)&ch->mtx, (ch->quirks & AHCI_Q_NOCCS) ? 1 : min(2, ch->numslots), (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0, devq); if (ch->sim == NULL) { cam_simq_free(devq); device_printf(dev, "unable to allocate sim\n"); error = ENOMEM; goto err1; } if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "unable to register xpt bus\n"); error = ENXIO; goto err2; } if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "unable to create path\n"); error = ENXIO; goto err3; } if (ch->pm_level > 3) { callout_reset(&ch->pm_timer, (ch->pm_level == 4) ? hz / 1000 : hz / 8, ahci_ch_pm, ch); } mtx_unlock(&ch->mtx); ahci_attached(device_get_parent(dev), ch); ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "disable_phy", CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_NEEDGIANT, ch, 0, ahci_ch_disablephy_proc, "IU", "Disable PHY"); return (0); err3: xpt_bus_deregister(cam_sim_path(ch->sim)); err2: cam_sim_free(ch->sim, /*free_devq*/TRUE); err1: bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); err0: bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_unlock(&ch->mtx); mtx_destroy(&ch->mtx); return (error); } static int ahci_ch_detach(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); ahci_detached(device_get_parent(dev), ch); mtx_lock(&ch->mtx); xpt_async(AC_LOST_DEVICE, ch->path, NULL); /* Forget about reset. */ if (ch->resetting) { ch->resetting = 0; xpt_release_simq(ch->sim, TRUE); } xpt_free_path(ch->path); xpt_bus_deregister(cam_sim_path(ch->sim)); cam_sim_free(ch->sim, /*free_devq*/TRUE); mtx_unlock(&ch->mtx); if (ch->pm_level > 3) callout_drain(&ch->pm_timer); callout_drain(&ch->reset_timer); bus_teardown_intr(dev, ch->r_irq, ch->ih); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); ahci_ch_deinit(dev); ahci_slotsfree(dev); ahci_dmafini(dev); bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_destroy(&ch->mtx); return (0); } static int ahci_ch_init(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); uint64_t work; /* Disable port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Setup work areas */ work = ch->dma.work_bus + AHCI_CL_OFFSET; ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff); ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32); work = ch->dma.rfis_bus; ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff); ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32); /* Activate the channel and power/spin up device */ ATA_OUTL(ch->r_mem, AHCI_P_CMD, (AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD | ((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) | ((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 ))); ahci_start_fr(ch); ahci_start(ch, 1); return (0); } static int ahci_ch_deinit(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); /* Disable port interrupts. */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Reset command register. */ ahci_stop(ch); ahci_stop_fr(ch); ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0); /* Allow everything, including partial and slumber modes. */ ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0); /* Request slumber mode transition and give some time to get there. */ ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER); DELAY(100); /* Disable PHY. */ ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); return (0); } static int ahci_ch_suspend(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_freeze_simq(ch->sim, 1); /* Forget about reset. */ if (ch->resetting) { ch->resetting = 0; callout_stop(&ch->reset_timer); xpt_release_simq(ch->sim, TRUE); } while (ch->oslots) msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100); ahci_ch_deinit(dev); mtx_unlock(&ch->mtx); return (0); } static int ahci_ch_resume(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); ahci_ch_init(dev); ahci_reset(ch); xpt_release_simq(ch->sim, TRUE); mtx_unlock(&ch->mtx); return (0); } devclass_t ahcich_devclass; static device_method_t ahcich_methods[] = { DEVMETHOD(device_probe, ahci_ch_probe), DEVMETHOD(device_attach, ahci_ch_attach), DEVMETHOD(device_detach, ahci_ch_detach), DEVMETHOD(device_suspend, ahci_ch_suspend), DEVMETHOD(device_resume, ahci_ch_resume), DEVMETHOD_END }; static driver_t ahcich_driver = { "ahcich", ahcich_methods, sizeof(struct ahci_channel) }; DRIVER_MODULE(ahcich, ahci, ahcich_driver, ahcich_devclass, NULL, NULL); struct ahci_dc_cb_args { bus_addr_t maddr; int error; }; static void ahci_dmainit(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); struct ahci_dc_cb_args dcba; size_t rfsize; int error; /* Command area. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE, 0, NULL, NULL, &ch->dma.work_tag); if (error != 0) goto error; error = bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, BUS_DMA_ZERO, &ch->dma.work_map); if (error != 0) goto error; error = bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT); if (error != 0 || (error = dcba.error) != 0) { bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); goto error; } ch->dma.work_bus = dcba.maddr; /* FIS receive area. */ if (ch->chcaps & AHCI_P_CMD_FBSCP) rfsize = 4096; else rfsize = 256; error = bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rfsize, 1, rfsize, 0, NULL, NULL, &ch->dma.rfis_tag); if (error != 0) goto error; error = bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0, &ch->dma.rfis_map); if (error != 0) goto error; error = bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis, rfsize, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT); if (error != 0 || (error = dcba.error) != 0) { bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); goto error; } ch->dma.rfis_bus = dcba.maddr; /* Data area. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, AHCI_SG_ENTRIES * PAGE_SIZE, AHCI_SG_ENTRIES, AHCI_PRD_MAX, 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag); if (error != 0) goto error; return; error: device_printf(dev, "WARNING - DMA initialization failed, error %d\n", error); ahci_dmafini(dev); } static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc; if (!(dcba->error = error)) dcba->maddr = segs[0].ds_addr; } static void ahci_dmafini(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); if (ch->dma.data_tag) { bus_dma_tag_destroy(ch->dma.data_tag); ch->dma.data_tag = NULL; } if (ch->dma.rfis_bus) { bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map); bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); ch->dma.rfis_bus = 0; ch->dma.rfis = NULL; } if (ch->dma.work_bus) { bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); ch->dma.work_bus = 0; ch->dma.work = NULL; } if (ch->dma.work_tag) { bus_dma_tag_destroy(ch->dma.work_tag); ch->dma.work_tag = NULL; } } static void ahci_slotsalloc(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); int i; /* Alloc and setup command/dma slots */ bzero(ch->slot, sizeof(ch->slot)); for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; slot->ch = ch; slot->slot = i; slot->state = AHCI_SLOT_EMPTY; slot->ct_offset = AHCI_CT_OFFSET + AHCI_CT_SIZE * i; slot->ccb = NULL; callout_init_mtx(&slot->timeout, &ch->mtx, 0); if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) device_printf(ch->dev, "FAILURE - create data_map\n"); } } static void ahci_slotsfree(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); int i; /* Free all dma slots */ for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; callout_drain(&slot->timeout); if (slot->dma.data_map) { bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); slot->dma.data_map = NULL; } } } static int ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr) { if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) || ((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) { u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS); union ccb *ccb; if (bootverbose) { if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) device_printf(ch->dev, "CONNECT requested\n"); else device_printf(ch->dev, "DISCONNECT requested\n"); } ahci_reset(ch); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return (0); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return (0); } xpt_rescan(ccb); return (1); } return (0); } static void ahci_cpd_check_events(struct ahci_channel *ch) { u_int32_t status; union ccb *ccb; device_t dev; if (ch->pm_level == 0) return; status = ATA_INL(ch->r_mem, AHCI_P_CMD); if ((status & AHCI_P_CMD_CPD) == 0) return; if (bootverbose) { dev = ch->dev; if (status & AHCI_P_CMD_CPS) { device_printf(dev, "COLD CONNECT requested\n"); } else device_printf(dev, "COLD DISCONNECT requested\n"); } ahci_reset(ch); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void ahci_notify_events(struct ahci_channel *ch, u_int32_t status) { struct cam_path *dpath; int i; if (ch->caps & AHCI_CAP_SSNTF) ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status); if (bootverbose) device_printf(ch->dev, "SNTF 0x%04x\n", status); for (i = 0; i < 16; i++) { if ((status & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, NULL, xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) { xpt_async(AC_SCSI_AEN, dpath, NULL); xpt_free_path(dpath); } } } static void ahci_done(struct ahci_channel *ch, union ccb *ccb) { mtx_assert(&ch->mtx, MA_OWNED); if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 || ch->batch == 0) { xpt_done(ccb); return; } STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe); } static void ahci_ch_intr(void *arg) { struct ahci_channel *ch = (struct ahci_channel *)arg; uint32_t istatus; /* Read interrupt statuses. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); mtx_lock(&ch->mtx); ahci_ch_intr_main(ch, istatus); mtx_unlock(&ch->mtx); } static void ahci_ch_intr_direct(void *arg) { struct ahci_channel *ch = (struct ahci_channel *)arg; struct ccb_hdr *ccb_h; uint32_t istatus; STAILQ_HEAD(, ccb_hdr) tmp_doneq = STAILQ_HEAD_INITIALIZER(tmp_doneq); /* Read interrupt statuses. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); mtx_lock(&ch->mtx); ch->batch = 1; ahci_ch_intr_main(ch, istatus); ch->batch = 0; /* * Prevent the possibility of issues caused by processing the queue * while unlocked below by moving the contents to a local queue. */ STAILQ_CONCAT(&tmp_doneq, &ch->doneq); mtx_unlock(&ch->mtx); while ((ccb_h = STAILQ_FIRST(&tmp_doneq)) != NULL) { STAILQ_REMOVE_HEAD(&tmp_doneq, sim_links.stqe); xpt_done_direct((union ccb *)ccb_h); } } static void ahci_ch_pm(void *arg) { struct ahci_channel *ch = (struct ahci_channel *)arg; uint32_t work; if (ch->numrslots != 0) return; work = ATA_INL(ch->r_mem, AHCI_P_CMD); if (ch->pm_level == 4) work |= AHCI_P_CMD_PARTIAL; else work |= AHCI_P_CMD_SLUMBER; ATA_OUTL(ch->r_mem, AHCI_P_CMD, work); } static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus) { uint32_t cstatus, serr = 0, sntf = 0, ok, err; enum ahci_err_type et; int i, ccs, port, reset = 0; /* Clear interrupt statuses. */ ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus); /* Read command statuses. */ if (ch->numtslots != 0) cstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); else cstatus = 0; if (ch->numrslots != ch->numtslots) cstatus |= ATA_INL(ch->r_mem, AHCI_P_CI); /* Read SNTF in one of possible ways. */ if ((istatus & AHCI_P_IX_SDB) && (ch->pm_present || ch->curr[0].atapi != 0)) { if (ch->caps & AHCI_CAP_SSNTF) sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF); else if (ch->fbs_enabled) { u_int8_t *fis = ch->dma.rfis + 0x58; for (i = 0; i < 16; i++) { if (fis[1] & 0x80) { fis[1] &= 0x7f; sntf |= 1 << i; } fis += 256; } } else { u_int8_t *fis = ch->dma.rfis + 0x58; if (fis[1] & 0x80) sntf = (1 << (fis[1] & 0x0f)); } } /* Process PHY events */ if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF | AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { serr = ATA_INL(ch->r_mem, AHCI_P_SERR); if (serr) { ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr); reset = ahci_phy_check_events(ch, serr); } } /* Process cold presence detection events */ if ((istatus & AHCI_P_IX_CPD) && !reset) ahci_cpd_check_events(ch); /* Process command errors */ if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { if (ch->quirks & AHCI_Q_NOCCS) { /* * ASMedia chips sometimes report failed commands as * completed. Count all running commands as failed. */ cstatus |= ch->rslots; /* They also report wrong CCS, so try to guess one. */ ccs = powerof2(cstatus) ? ffs(cstatus) - 1 : -1; } else { ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; } //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n", // __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), // serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs); port = -1; if (ch->fbs_enabled) { uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS); if (fbs & AHCI_P_FBS_SDE) { port = (fbs & AHCI_P_FBS_DWE) >> AHCI_P_FBS_DWE_SHIFT; } else { for (i = 0; i < 16; i++) { if (ch->numrslotspd[i] == 0) continue; if (port == -1) port = i; else if (port != i) { port = -2; break; } } } } err = ch->rslots & cstatus; } else { ccs = 0; err = 0; port = -1; } /* Complete all successful commands. */ ok = ch->rslots & ~cstatus; for (i = 0; i < ch->numslots; i++) { if ((ok >> i) & 1) ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE); } /* On error, complete the rest of commands with error statuses. */ if (err) { if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } ahci_done(ch, fccb); } for (i = 0; i < ch->numslots; i++) { /* XXX: reqests in loading state. */ if (((err >> i) & 1) == 0) continue; if (port >= 0 && ch->slot[i].ccb->ccb_h.target_id != port) continue; if (istatus & AHCI_P_IX_TFE) { if (port != -2) { /* Task File Error */ if (ch->numtslotspd[ ch->slot[i].ccb->ccb_h.target_id] == 0) { /* Untagged operation. */ if (i == ccs) et = AHCI_ERR_TFE; else et = AHCI_ERR_INNOCENT; } else { /* Tagged operation. */ et = AHCI_ERR_NCQ; } } else { et = AHCI_ERR_TFE; ch->fatalerr = 1; } } else if (istatus & AHCI_P_IX_IF) { if (ch->numtslots == 0 && i != ccs && port != -2) et = AHCI_ERR_INNOCENT; else et = AHCI_ERR_SATA; } else et = AHCI_ERR_INVALID; ahci_end_transaction(&ch->slot[i], et); } /* * We can't reinit port if there are some other * commands active, use resume to complete them. */ if (ch->rslots != 0 && !ch->recoverycmd) ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC); } /* Process NOTIFY events */ if (sntf) ahci_notify_events(ch, sntf); } /* Must be called with channel locked. */ static int ahci_check_collision(struct ahci_channel *ch, union ccb *ccb) { int t = ccb->ccb_h.target_id; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { /* Tagged command while we have no supported tag free. */ if (((~ch->oslots) & (0xffffffff >> (32 - ch->curr[t].tags))) == 0) return (1); /* If we have FBS */ if (ch->fbs_enabled) { /* Tagged command while untagged are active. */ if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0) return (1); } else { /* Tagged command while untagged are active. */ if (ch->numrslots != 0 && ch->numtslots == 0) return (1); /* Tagged command while tagged to other target is active. */ if (ch->numtslots != 0 && ch->taggedtarget != ccb->ccb_h.target_id) return (1); } } else { /* If we have FBS */ if (ch->fbs_enabled) { /* Untagged command while tagged are active. */ if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0) return (1); } else { /* Untagged command while tagged are active. */ if (ch->numrslots != 0 && ch->numtslots != 0) return (1); } } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) { /* Atomic command while anything active. */ if (ch->numrslots != 0) return (1); } /* We have some atomic command running. */ if (ch->aslots != 0) return (1); return (0); } /* Must be called with channel locked. */ static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb) { struct ahci_slot *slot; int tag, tags; /* Choose empty slot. */ tags = ch->numslots; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) tags = ch->curr[ccb->ccb_h.target_id].tags; if (ch->lastslot + 1 < tags) tag = ffs(~(ch->oslots >> (ch->lastslot + 1))); else tag = 0; if (tag == 0 || tag + ch->lastslot >= tags) tag = ffs(~ch->oslots) - 1; else tag += ch->lastslot; ch->lastslot = tag; /* Occupy chosen slot. */ slot = &ch->slot[tag]; slot->ccb = ccb; /* Stop PM timer. */ if (ch->numrslots == 0 && ch->pm_level > 3) callout_stop(&ch->pm_timer); /* Update channel stats. */ ch->oslots |= (1 << tag); ch->numrslots++; ch->numrslotspd[ccb->ccb_h.target_id]++; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots++; ch->numtslotspd[ccb->ccb_h.target_id]++; ch->taggedtarget = ccb->ccb_h.target_id; } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) ch->aslots |= (1 << tag); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { slot->state = AHCI_SLOT_LOADING; bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, ahci_dmasetprd, slot, 0); } else { slot->dma.nsegs = 0; ahci_execute_transaction(slot); } } /* Locked by busdma engine. */ static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct ahci_slot *slot = arg; struct ahci_channel *ch = slot->ch; struct ahci_cmd_tab *ctp; struct ahci_dma_prd *prd; int i; if (error) { device_printf(ch->dev, "DMA load error\n"); ahci_end_transaction(slot, AHCI_ERR_INVALID); return; } KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n")); /* Get a piece of the workspace for this request */ ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset); /* Fill S/G table */ prd = &ctp->prd_tab[0]; for (i = 0; i < nsegs; i++) { prd[i].dba = htole64(segs[i].ds_addr); prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK); } slot->dma.nsegs = nsegs; bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); ahci_execute_transaction(slot); } /* Must be called with channel locked. */ static void ahci_execute_transaction(struct ahci_slot *slot) { struct ahci_channel *ch = slot->ch; struct ahci_cmd_tab *ctp; struct ahci_cmd_list *clp; union ccb *ccb = slot->ccb; int port = ccb->ccb_h.target_id & 0x0f; int fis_size, i, softreset; uint8_t *fis = ch->dma.rfis + 0x40; uint8_t val; uint16_t cmd_flags; /* Get a piece of the workspace for this request */ ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset); /* Setup the FIS for this request */ if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) { device_printf(ch->dev, "Setting up SATA FIS failed\n"); ahci_end_transaction(slot, AHCI_ERR_INVALID); return; } /* Setup the command list entry */ clp = (struct ahci_cmd_list *) (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); cmd_flags = (ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) | (ccb->ccb_h.func_code == XPT_SCSI_IO ? (AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) | (fis_size / sizeof(u_int32_t)) | (port << 12); clp->prd_length = htole16(slot->dma.nsegs); /* Special handling for Soft Reset command. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) { if (ccb->ataio.cmd.control & ATA_A_RESET) { softreset = 1; /* Kick controller into sane state */ ahci_stop(ch); ahci_clo(ch); ahci_start(ch, 0); cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY; } else { softreset = 2; /* Prepare FIS receive area for check. */ for (i = 0; i < 20; i++) fis[i] = 0xff; } } else softreset = 0; clp->bytecount = 0; clp->cmd_flags = htole16(cmd_flags); clp->cmd_table_phys = htole64(ch->dma.work_bus + slot->ct_offset); bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_PREREAD); /* Set ACTIVE bit for NCQ commands. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot); } /* If FBS is enabled, set PMP port. */ if (ch->fbs_enabled) { ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | (port << AHCI_P_FBS_DEV_SHIFT)); } /* Issue command to the controller. */ slot->state = AHCI_SLOT_RUNNING; ch->rslots |= (1 << slot->slot); ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot)); /* Device reset commands doesn't interrupt. Poll them. */ if (ccb->ccb_h.func_code == XPT_ATA_IO && (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) { int count, timeout = ccb->ccb_h.timeout * 100; enum ahci_err_type et = AHCI_ERR_NONE; for (count = 0; count < timeout; count++) { DELAY(10); if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot))) break; if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) && softreset != 1) { #if 0 device_printf(ch->dev, "Poll error on slot %d, TFD: %04x\n", slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD)); #endif et = AHCI_ERR_TFE; break; } /* Workaround for ATI SB600/SB700 chipsets. */ if (ccb->ccb_h.target_id == 15 && (ch->quirks & AHCI_Q_ATI_PMP_BUG) && (ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) { et = AHCI_ERR_TIMEOUT; break; } } /* * Some Marvell controllers require additional time * after soft reset to work properly. Setup delay * to 50ms after soft reset. */ if (ch->quirks & AHCI_Q_MRVL_SR_DEL) DELAY(50000); /* * Marvell HBAs with non-RAID firmware do not wait for * readiness after soft reset, so we have to wait here. * Marvell RAIDs do not have this problem, but instead * sometimes forget to update FIS receive area, breaking * this wait. */ if ((ch->quirks & AHCI_Q_NOBSYRES) == 0 && (ch->quirks & AHCI_Q_ATI_PMP_BUG) == 0 && softreset == 2 && et == AHCI_ERR_NONE) { for ( ; count < timeout; count++) { bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_POSTREAD); val = fis[2]; bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_PREREAD); if ((val & ATA_S_BUSY) == 0) break; DELAY(10); } } if (timeout && (count >= timeout)) { device_printf(ch->dev, "Poll timeout on slot %d port %d\n", slot->slot, port); device_printf(ch->dev, "is %08x cs %08x ss %08x " "rs %08x tfd %02x serr %08x cmd %08x\n", ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), ATA_INL(ch->r_mem, AHCI_P_CMD)); et = AHCI_ERR_TIMEOUT; } /* Kick controller into sane state and enable FBS. */ if (softreset == 2) ch->eslots |= (1 << slot->slot); ahci_end_transaction(slot, et); return; } /* Start command execution timeout */ callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout / 2, 0, ahci_timeout, slot, 0); return; } /* Must be called with channel locked. */ static void ahci_process_timeout(struct ahci_channel *ch) { int i; mtx_assert(&ch->mtx, MA_OWNED); /* Handle the rest of commands. */ for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT); } } /* Must be called with channel locked. */ static void ahci_rearm_timeout(struct ahci_channel *ch) { int i; mtx_assert(&ch->mtx, MA_OWNED); for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; /* Do we have a running request on slot? */ if (slot->state < AHCI_SLOT_RUNNING) continue; if ((ch->toslots & (1 << i)) == 0) continue; callout_reset_sbt(&slot->timeout, SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, ahci_timeout, slot, 0); } } /* Locked by callout mechanism. */ static void ahci_timeout(void *arg) { struct ahci_slot *slot = arg; struct ahci_channel *ch = slot->ch; device_t dev = ch->dev; uint32_t sstatus; int ccs; int i; /* Check for stale timeout. */ if (slot->state < AHCI_SLOT_RUNNING) return; /* Check if slot was not being executed last time we checked. */ if (slot->state < AHCI_SLOT_EXECUTING) { /* Check if slot started executing. */ sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot || ch->fbs_enabled || ch->wrongccs) slot->state = AHCI_SLOT_EXECUTING; else if ((ch->rslots & (1 << ccs)) == 0) { ch->wrongccs = 1; slot->state = AHCI_SLOT_EXECUTING; } callout_reset_sbt(&slot->timeout, SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, ahci_timeout, slot, 0); return; } device_printf(dev, "Timeout on slot %d port %d\n", slot->slot, slot->ccb->ccb_h.target_id & 0x0f); device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x " "serr %08x cmd %08x\n", ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), ATA_INL(ch->r_mem, AHCI_P_CMD)); /* Handle frozen command. */ if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } ahci_done(ch, fccb); } if (!ch->fbs_enabled && !ch->wrongccs) { /* Without FBS we know real timeout source. */ ch->fatalerr = 1; /* Handle command with timeout. */ ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT); /* Handle the rest of commands. */ for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); } } else { /* With FBS we wait for other commands timeout and pray. */ if (ch->toslots == 0) xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); if ((ch->rslots & ~ch->toslots) == 0) ahci_process_timeout(ch); else device_printf(dev, " ... waiting for slots %08x\n", ch->rslots & ~ch->toslots); } } /* Must be called with channel locked. */ static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et) { struct ahci_channel *ch = slot->ch; union ccb *ccb = slot->ccb; struct ahci_cmd_list *clp; int lastto; uint32_t sig; bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); clp = (struct ahci_cmd_list *) (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); /* Read result registers to the result struct * May be incorrect if several commands finished same time, * so read only when sure or have to. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { struct ata_res *res = &ccb->ataio.res; if ((et == AHCI_ERR_TFE) || (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { u_int8_t *fis = ch->dma.rfis + 0x40; bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_POSTREAD); if (ch->fbs_enabled) { fis += ccb->ccb_h.target_id * 256; res->status = fis[2]; res->error = fis[3]; } else { uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD); res->status = tfd; res->error = tfd >> 8; } res->lba_low = fis[4]; res->lba_mid = fis[5]; res->lba_high = fis[6]; res->device = fis[7]; res->lba_low_exp = fis[8]; res->lba_mid_exp = fis[9]; res->lba_high_exp = fis[10]; res->sector_count = fis[12]; res->sector_count_exp = fis[13]; /* * Some weird controllers do not return signature in * FIS receive area. Read it from PxSIG register. */ if ((ch->quirks & AHCI_Q_ALTSIG) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET) == 0) { sig = ATA_INL(ch->r_mem, AHCI_P_SIG); res->lba_high = sig >> 24; res->lba_mid = sig >> 16; res->lba_low = sig >> 8; res->sector_count = sig; } } else bzero(res, sizeof(*res)); if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 && (ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && (ch->quirks & AHCI_Q_NOCOUNT) == 0) { ccb->ataio.resid = ccb->ataio.dxfer_len - le32toh(clp->bytecount); } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && (ch->quirks & AHCI_Q_NOCOUNT) == 0) { ccb->csio.resid = ccb->csio.dxfer_len - le32toh(clp->bytecount); } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, (ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); } if (et != AHCI_ERR_NONE) ch->eslots |= (1 << slot->slot); /* In case of error, freeze device for proper recovery. */ if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) && !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } /* Set proper result status. */ ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (et) { case AHCI_ERR_NONE: ccb->ccb_h.status |= CAM_REQ_CMP; if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.scsi_status = SCSI_STATUS_OK; break; case AHCI_ERR_INVALID: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_INVALID; break; case AHCI_ERR_INNOCENT: ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; case AHCI_ERR_TFE: case AHCI_ERR_NCQ: if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } else { ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; } break; case AHCI_ERR_SATA: ch->fatalerr = 1; if (!ch->recoverycmd) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } ccb->ccb_h.status |= CAM_UNCOR_PARITY; break; case AHCI_ERR_TIMEOUT: if (!ch->recoverycmd) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } ccb->ccb_h.status |= CAM_CMD_TIMEOUT; break; default: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } /* Free slot. */ ch->oslots &= ~(1 << slot->slot); ch->rslots &= ~(1 << slot->slot); ch->aslots &= ~(1 << slot->slot); slot->state = AHCI_SLOT_EMPTY; slot->ccb = NULL; /* Update channel stats. */ ch->numrslots--; ch->numrslotspd[ccb->ccb_h.target_id]--; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots--; ch->numtslotspd[ccb->ccb_h.target_id]--; } /* Cancel timeout state if request completed normally. */ if (et != AHCI_ERR_TIMEOUT) { lastto = (ch->toslots == (1 << slot->slot)); ch->toslots &= ~(1 << slot->slot); if (lastto) xpt_release_simq(ch->sim, TRUE); } /* If it was first request of reset sequence and there is no error, * proceed to second request. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET) && et == AHCI_ERR_NONE) { ccb->ataio.cmd.control &= ~ATA_A_RESET; ahci_begin_transaction(ch, ccb); return; } /* If it was our READ LOG command - process it. */ if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { ahci_process_read_log(ch, ccb); /* If it was our REQUEST SENSE command - process it. */ } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { ahci_process_request_sense(ch, ccb); /* If it was NCQ or ATAPI command error, put result on hold. */ } else if (et == AHCI_ERR_NCQ || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { ch->hold[slot->slot] = ccb; ch->numhslots++; } else ahci_done(ch, ccb); /* If we have no other active commands, ... */ if (ch->rslots == 0) { /* if there was fatal error - reset port. */ if (ch->toslots != 0 || ch->fatalerr) { ahci_reset(ch); } else { /* if we have slots in error, we can reinit port. */ if (ch->eslots != 0) { ahci_stop(ch); ahci_clo(ch); ahci_start(ch, 1); } /* if there commands on hold, we can do READ LOG. */ if (!ch->recoverycmd && ch->numhslots) ahci_issue_recovery(ch); } /* If all the rest of commands are in timeout - give them chance. */ } else if ((ch->rslots & ~ch->toslots) == 0 && et != AHCI_ERR_TIMEOUT) ahci_rearm_timeout(ch); /* Unfreeze frozen command. */ if (ch->frozen && !ahci_check_collision(ch, ch->frozen)) { union ccb *fccb = ch->frozen; ch->frozen = NULL; ahci_begin_transaction(ch, fccb); xpt_release_simq(ch->sim, TRUE); } /* Start PM timer. */ if (ch->numrslots == 0 && ch->pm_level > 3 && (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) { callout_schedule(&ch->pm_timer, (ch->pm_level == 4) ? hz / 1000 : hz / 8); } } static void ahci_issue_recovery(struct ahci_channel *ch) { union ccb *ccb; struct ccb_ataio *ataio; struct ccb_scsiio *csio; int i; /* Find some held command. */ for (i = 0; i < ch->numslots; i++) { if (ch->hold[i]) break; } ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { device_printf(ch->dev, "Unable to allocate recovery command\n"); completeall: /* We can't do anything -- complete held commands. */ for (i = 0; i < ch->numslots; i++) { if (ch->hold[i] == NULL) continue; ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } ahci_reset(ch); return; } ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { /* READ LOG */ ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; ccb->ccb_h.func_code = XPT_ATA_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ ataio = &ccb->ataio; ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT); if (ataio->data_ptr == NULL) { xpt_free_ccb(ccb); device_printf(ch->dev, "Unable to allocate memory for READ LOG command\n"); goto completeall; } ataio->dxfer_len = 512; bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = CAM_ATAIO_48BIT; ataio->cmd.command = 0x2F; /* READ LOG EXT */ ataio->cmd.sector_count = 1; ataio->cmd.sector_count_exp = 0; ataio->cmd.lba_low = 0x10; ataio->cmd.lba_mid = 0; ataio->cmd.lba_mid_exp = 0; } else { /* REQUEST SENSE */ ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; ccb->ccb_h.recovery_slot = i; ccb->ccb_h.func_code = XPT_SCSI_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.status = 0; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ csio = &ccb->csio; csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; csio->dxfer_len = ch->hold[i]->csio.sense_len; csio->cdb_len = 6; bzero(&csio->cdb_io, sizeof(csio->cdb_io)); csio->cdb_io.cdb_bytes[0] = 0x03; csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; } /* Freeze SIM while doing recovery. */ ch->recoverycmd = 1; xpt_freeze_simq(ch->sim, 1); ahci_begin_transaction(ch, ccb); } static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb) { uint8_t *data; struct ata_res *res; int i; ch->recoverycmd = 0; data = ccb->ataio.data_ptr; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (data[0] & 0x80) == 0) { for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) continue; if ((data[0] & 0x1F) == i) { res = &ch->hold[i]->ataio.res; res->status = data[2]; res->error = data[3]; res->lba_low = data[4]; res->lba_mid = data[5]; res->lba_high = data[6]; res->device = data[7]; res->lba_low_exp = data[8]; res->lba_mid_exp = data[9]; res->lba_high_exp = data[10]; res->sector_count = data[12]; res->sector_count_exp = data[13]; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; } ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } else { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) device_printf(ch->dev, "Error while READ LOG EXT\n"); else if ((data[0] & 0x80) == 0) { device_printf(ch->dev, "Non-queued command error in READ LOG EXT\n"); } for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) continue; ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } free(ccb->ataio.data_ptr, M_AHCI); xpt_free_ccb(ccb); xpt_release_simq(ch->sim, TRUE); } static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb) { int i; ch->recoverycmd = 0; i = ccb->ccb_h.recovery_slot; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; } ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; xpt_free_ccb(ccb); xpt_release_simq(ch->sim, TRUE); } static void ahci_start(struct ahci_channel *ch, int fbs) { u_int32_t cmd; /* Run the channel start callback, if any. */ if (ch->start) ch->start(ch); /* Clear SATA error register */ ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF); /* Clear any interrupts pending on this channel */ ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF); /* Configure FIS-based switching if supported. */ if (ch->chcaps & AHCI_P_CMD_FBSCP) { ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0; ATA_OUTL(ch->r_mem, AHCI_P_FBS, ch->fbs_enabled ? AHCI_P_FBS_EN : 0); } /* Start operations on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); cmd &= ~AHCI_P_CMD_PMA; ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST | (ch->pm_present ? AHCI_P_CMD_PMA : 0)); } static void ahci_stop(struct ahci_channel *ch) { u_int32_t cmd; int timeout; /* Kill all activity on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST); /* Wait for activity stop. */ timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(ch->dev, "stopping AHCI engine failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR); ch->eslots = 0; } static void ahci_clo(struct ahci_channel *ch) { u_int32_t cmd; int timeout; /* Issue Command List Override if supported */ if (ch->caps & AHCI_CAP_SCLO) { cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); cmd |= AHCI_P_CMD_CLO; ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd); timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(ch->dev, "executing CLO failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO); } } static void ahci_stop_fr(struct ahci_channel *ch) { u_int32_t cmd; int timeout; /* Kill all FIS reception on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE); /* Wait for FIS reception stop. */ timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(ch->dev, "stopping AHCI FR engine failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR); } static void ahci_start_fr(struct ahci_channel *ch) { u_int32_t cmd; /* Start FIS reception on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE); } static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0) { int timeout = 0; uint32_t val; while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) & (ATA_S_BUSY | ATA_S_DRQ)) { if (timeout > t) { if (t != 0) { device_printf(ch->dev, "AHCI reset: device not ready after %dms " "(tfd = %08x)\n", MAX(t, 0) + t0, val); } return (EBUSY); } DELAY(1000); timeout++; } if (bootverbose) device_printf(ch->dev, "AHCI reset: device ready after %dms\n", timeout + t0); return (0); } static void ahci_reset_to(void *arg) { struct ahci_channel *ch = arg; if (ch->resetting == 0) return; ch->resetting--; if (ahci_wait_ready(ch, ch->resetting == 0 ? -1 : 0, (310 - ch->resetting) * 100) == 0) { ch->resetting = 0; ahci_start(ch, 1); xpt_release_simq(ch->sim, TRUE); return; } if (ch->resetting == 0) { ahci_clo(ch); ahci_start(ch, 1); xpt_release_simq(ch->sim, TRUE); return; } callout_schedule(&ch->reset_timer, hz / 10); } static void ahci_reset(struct ahci_channel *ch) { struct ahci_controller *ctlr = device_get_softc(device_get_parent(ch->dev)); int i; xpt_freeze_simq(ch->sim, 1); if (bootverbose) device_printf(ch->dev, "AHCI reset...\n"); /* Forget about previous reset. */ if (ch->resetting) { ch->resetting = 0; callout_stop(&ch->reset_timer); xpt_release_simq(ch->sim, TRUE); } /* Requeue freezed command. */ if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } ahci_done(ch, fccb); } /* Kill the engine and requeue all running commands. */ ahci_stop(ch); for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; /* XXX; Commands in loading state. */ ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); } for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } if (ch->toslots != 0) xpt_release_simq(ch->sim, TRUE); ch->eslots = 0; ch->toslots = 0; ch->wrongccs = 0; ch->fatalerr = 0; /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); /* Disable port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Reset and reconnect PHY, */ if (!ahci_sata_phy_reset(ch)) { if (bootverbose) device_printf(ch->dev, "AHCI reset: device not found\n"); ch->devices = 0; /* Enable wanted port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | AHCI_P_IX_PRC | AHCI_P_IX_PC)); xpt_release_simq(ch->sim, TRUE); return; } if (bootverbose) device_printf(ch->dev, "AHCI reset: device found\n"); /* Wait for clearing busy status. */ if (ahci_wait_ready(ch, dumping ? 31000 : 0, 0)) { if (dumping) ahci_clo(ch); else ch->resetting = 310; } ch->devices = 1; /* Enable wanted port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | AHCI_P_IX_TFE | AHCI_P_IX_HBF | AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF | ((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC | AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) | AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR))); if (ch->resetting) callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, ch); else { ahci_start(ch, 1); xpt_release_simq(ch->sim, TRUE); } } static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag) { u_int8_t *fis = &ctp->cfis[0]; bzero(fis, 20); fis[0] = 0x27; /* host to device */ fis[1] = (ccb->ccb_h.target_id & 0x0f); if (ccb->ccb_h.func_code == XPT_SCSI_IO) { fis[1] |= 0x80; fis[2] = ATA_PACKET_CMD; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) fis[3] = ATA_F_DMA; else { fis[5] = ccb->csio.dxfer_len; fis[6] = ccb->csio.dxfer_len >> 8; } fis[7] = ATA_D_LBA; fis[15] = ATA_A_4BIT; bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, ctp->acmd, ccb->csio.cdb_len); bzero(ctp->acmd + ccb->csio.cdb_len, 32 - ccb->csio.cdb_len); } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) { fis[1] |= 0x80; fis[2] = ccb->ataio.cmd.command; fis[3] = ccb->ataio.cmd.features; fis[4] = ccb->ataio.cmd.lba_low; fis[5] = ccb->ataio.cmd.lba_mid; fis[6] = ccb->ataio.cmd.lba_high; fis[7] = ccb->ataio.cmd.device; fis[8] = ccb->ataio.cmd.lba_low_exp; fis[9] = ccb->ataio.cmd.lba_mid_exp; fis[10] = ccb->ataio.cmd.lba_high_exp; fis[11] = ccb->ataio.cmd.features_exp; fis[12] = ccb->ataio.cmd.sector_count; if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { fis[12] &= 0x07; fis[12] |= tag << 3; } fis[13] = ccb->ataio.cmd.sector_count_exp; if (ccb->ataio.ata_flags & ATA_FLAG_ICC) fis[14] = ccb->ataio.icc; fis[15] = ATA_A_4BIT; if (ccb->ataio.ata_flags & ATA_FLAG_AUX) { fis[16] = ccb->ataio.aux & 0xff; fis[17] = (ccb->ataio.aux >> 8) & 0xff; fis[18] = (ccb->ataio.aux >> 16) & 0xff; fis[19] = (ccb->ataio.aux >> 24) & 0xff; } } else { fis[15] = ccb->ataio.cmd.control; } return (20); } static int ahci_sata_connect(struct ahci_channel *ch) { u_int32_t status; int timeout, found = 0; /* Wait up to 100ms for "connect well" */ for (timeout = 0; timeout < 1000 ; timeout++) { status = ATA_INL(ch->r_mem, AHCI_P_SSTS); if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) found = 1; if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) break; if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) { if (bootverbose) { device_printf(ch->dev, "SATA offline status=%08x\n", status); } return (0); } if (found == 0 && timeout >= 100) break; DELAY(100); } if (timeout >= 1000 || !found) { if (bootverbose) { device_printf(ch->dev, "SATA connect timeout time=%dus status=%08x\n", timeout * 100, status); } return (0); } if (bootverbose) { device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", timeout * 100, status); } /* Clear SATA error register */ ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff); return (1); } static int ahci_sata_phy_reset(struct ahci_channel *ch) { int sata_rev; uint32_t val, detval; if (ch->listening) { val = ATA_INL(ch->r_mem, AHCI_P_CMD); val |= AHCI_P_CMD_SUD; ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); ch->listening = 0; } sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; if (sata_rev == 1) val = ATA_SC_SPD_SPEED_GEN1; else if (sata_rev == 2) val = ATA_SC_SPD_SPEED_GEN2; else if (sata_rev == 3) val = ATA_SC_SPD_SPEED_GEN3; else val = 0; detval = ahci_ch_detval(ch, ATA_SC_DET_RESET); ATA_OUTL(ch->r_mem, AHCI_P_SCTL, detval | val | ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER); DELAY(1000); detval = ahci_ch_detval(ch, ATA_SC_DET_IDLE); ATA_OUTL(ch->r_mem, AHCI_P_SCTL, detval | val | ((ch->pm_level > 0) ? 0 : (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER))); if (!ahci_sata_connect(ch)) { if (ch->caps & AHCI_CAP_SSS) { val = ATA_INL(ch->r_mem, AHCI_P_CMD); val &= ~AHCI_P_CMD_SUD; ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); ch->listening = 1; } else if (ch->pm_level > 0) ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); return (0); } return (1); } static int ahci_check_ids(struct ahci_channel *ch, union ccb *ccb) { if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) { ccb->ccb_h.status = CAM_TID_INVALID; ahci_done(ch, ccb); return (-1); } if (ccb->ccb_h.target_lun != 0) { ccb->ccb_h.status = CAM_LUN_INVALID; ahci_done(ch, ccb); return (-1); } return (0); } static void ahciaction(struct cam_sim *sim, union ccb *ccb) { struct ahci_channel *ch; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n", ccb->ccb_h.func_code)); ch = (struct ahci_channel *)cam_sim_softc(sim); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ATA_IO: /* Execute the requested I/O operation */ case XPT_SCSI_IO: if (ahci_check_ids(ch, ccb)) return; if (ch->devices == 0 || (ch->pm_present == 0 && ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } ccb->ccb_h.recovery_type = RECOVERY_NONE; /* Check for command collision. */ if (ahci_check_collision(ch, ccb)) { /* Freeze command. */ ch->frozen = ccb; /* We have only one frozen slot, so freeze simq also. */ xpt_freeze_simq(ch->sim, 1); return; } ahci_begin_transaction(ch, ccb); return; case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ahci_device *d; if (ahci_check_ids(ch, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) d->revision = cts->xport_specific.sata.revision; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) d->mode = cts->xport_specific.sata.mode; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) d->bytecount = min(8192, cts->xport_specific.sata.bytecount); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) d->tags = min(ch->numslots, cts->xport_specific.sata.tags); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) ch->pm_present = cts->xport_specific.sata.pm_present; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) d->atapi = cts->xport_specific.sata.atapi; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) d->caps = cts->xport_specific.sata.caps; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct ahci_device *d; uint32_t status; if (ahci_check_ids(ch, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; cts->protocol = PROTO_UNSPECIFIED; cts->protocol_version = PROTO_VERSION_UNSPECIFIED; cts->transport = XPORT_SATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->proto_specific.valid = 0; cts->xport_specific.sata.valid = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (ccb->ccb_h.target_id == 15 || (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK; if (status & 0x0f0) { cts->xport_specific.sata.revision = (status & 0x0f0) >> 4; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; } cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; if (ch->pm_level) { if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC)) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; if (ch->caps2 & AHCI_CAP2_APST) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST; } if ((ch->caps & AHCI_CAP_SNCQ) && (ch->quirks & AHCI_Q_NOAA) == 0) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA; cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; cts->xport_specific.sata.caps &= ch->user[ccb->ccb_h.target_id].caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } else { cts->xport_specific.sata.revision = d->revision; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; cts->xport_specific.sata.caps = d->caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } cts->xport_specific.sata.mode = d->mode; cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; cts->xport_specific.sata.bytecount = d->bytecount; cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; cts->xport_specific.sata.pm_present = ch->pm_present; cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; cts->xport_specific.sata.tags = d->tags; cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; cts->xport_specific.sata.atapi = d->atapi; cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ ahci_reset(ch); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; if (ch->caps & AHCI_CAP_SNCQ) cpi->hba_inquiry |= PI_TAG_ABLE; if (ch->caps & AHCI_CAP_SPM) cpi->hba_inquiry |= PI_SATAPM; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; if ((ch->quirks & AHCI_Q_NOAUX) == 0) cpi->hba_misc |= PIM_ATA_EXT; cpi->hba_eng_cnt = 0; if (ch->caps & AHCI_CAP_SPM) cpi->max_target = 15; else cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "AHCI", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = ctob(AHCI_SG_ENTRIES - 1); /* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */ if (ch->quirks & AHCI_Q_MAXIO_64K) cpi->maxio = min(cpi->maxio, 128 * 512); cpi->hba_vendor = ch->vendorid; cpi->hba_device = ch->deviceid; cpi->hba_subvendor = ch->subvendorid; cpi->hba_subdevice = ch->subdeviceid; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } ahci_done(ch, ccb); } static void ahcipoll(struct cam_sim *sim) { struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim); uint32_t istatus; /* Read interrupt statuses and process if any. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); if (istatus != 0) ahci_ch_intr_main(ch, istatus); if (ch->resetting != 0 && (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) { ch->resetpolldiv = 1000; ahci_reset_to(ch); } } devclass_t ahci_devclass; MODULE_VERSION(ahci, 1); MODULE_DEPEND(ahci, cam, 1, 1, 1); diff --git a/sys/dev/ahci/ahci.h b/sys/dev/ahci/ahci.h index 472f9845117d..f1d5b18806b3 100644 --- a/sys/dev/ahci/ahci.h +++ b/sys/dev/ahci/ahci.h @@ -1,679 +1,678 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1998 - 2008 Søren Schmidt * Copyright (c) 2009-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* ATA register defines */ #define ATA_DATA 0 /* (RW) data */ #define ATA_FEATURE 1 /* (W) feature */ #define ATA_F_DMA 0x01 /* enable DMA */ #define ATA_F_OVL 0x02 /* enable overlap */ #define ATA_COUNT 2 /* (W) sector count */ #define ATA_SECTOR 3 /* (RW) sector # */ #define ATA_CYL_LSB 4 /* (RW) cylinder# LSB */ #define ATA_CYL_MSB 5 /* (RW) cylinder# MSB */ #define ATA_DRIVE 6 /* (W) Sector/Drive/Head */ #define ATA_D_LBA 0x40 /* use LBA addressing */ #define ATA_D_IBM 0xa0 /* 512 byte sectors, ECC */ #define ATA_COMMAND 7 /* (W) command */ #define ATA_ERROR 8 /* (R) error */ #define ATA_E_ILI 0x01 /* illegal length */ #define ATA_E_NM 0x02 /* no media */ #define ATA_E_ABORT 0x04 /* command aborted */ #define ATA_E_MCR 0x08 /* media change request */ #define ATA_E_IDNF 0x10 /* ID not found */ #define ATA_E_MC 0x20 /* media changed */ #define ATA_E_UNC 0x40 /* uncorrectable data */ #define ATA_E_ICRC 0x80 /* UDMA crc error */ #define ATA_E_ATAPI_SENSE_MASK 0xf0 /* ATAPI sense key mask */ #define ATA_IREASON 9 /* (R) interrupt reason */ #define ATA_I_CMD 0x01 /* cmd (1) | data (0) */ #define ATA_I_IN 0x02 /* read (1) | write (0) */ #define ATA_I_RELEASE 0x04 /* released bus (1) */ #define ATA_I_TAGMASK 0xf8 /* tag mask */ #define ATA_STATUS 10 /* (R) status */ #define ATA_ALTSTAT 11 /* (R) alternate status */ #define ATA_S_ERROR 0x01 /* error */ #define ATA_S_INDEX 0x02 /* index */ #define ATA_S_CORR 0x04 /* data corrected */ #define ATA_S_DRQ 0x08 /* data request */ #define ATA_S_DSC 0x10 /* drive seek completed */ #define ATA_S_SERVICE 0x10 /* drive needs service */ #define ATA_S_DWF 0x20 /* drive write fault */ #define ATA_S_DMA 0x20 /* DMA ready */ #define ATA_S_READY 0x40 /* drive ready */ #define ATA_S_BUSY 0x80 /* busy */ #define ATA_CONTROL 12 /* (W) control */ #define ATA_A_IDS 0x02 /* disable interrupts */ #define ATA_A_RESET 0x04 /* RESET controller */ #define ATA_A_4BIT 0x08 /* 4 head bits */ #define ATA_A_HOB 0x80 /* High Order Byte enable */ /* SATA register defines */ #define ATA_SSTATUS 13 #define ATA_SS_DET_MASK 0x0000000f #define ATA_SS_DET_NO_DEVICE 0x00000000 #define ATA_SS_DET_DEV_PRESENT 0x00000001 #define ATA_SS_DET_PHY_ONLINE 0x00000003 #define ATA_SS_DET_PHY_OFFLINE 0x00000004 #define ATA_SS_SPD_MASK 0x000000f0 #define ATA_SS_SPD_NO_SPEED 0x00000000 #define ATA_SS_SPD_GEN1 0x00000010 #define ATA_SS_SPD_GEN2 0x00000020 #define ATA_SS_SPD_GEN3 0x00000030 #define ATA_SS_IPM_MASK 0x00000f00 #define ATA_SS_IPM_NO_DEVICE 0x00000000 #define ATA_SS_IPM_ACTIVE 0x00000100 #define ATA_SS_IPM_PARTIAL 0x00000200 #define ATA_SS_IPM_SLUMBER 0x00000600 #define ATA_SS_IPM_DEVSLEEP 0x00000800 #define ATA_SERROR 14 #define ATA_SE_DATA_CORRECTED 0x00000001 #define ATA_SE_COMM_CORRECTED 0x00000002 #define ATA_SE_DATA_ERR 0x00000100 #define ATA_SE_COMM_ERR 0x00000200 #define ATA_SE_PROT_ERR 0x00000400 #define ATA_SE_HOST_ERR 0x00000800 #define ATA_SE_PHY_CHANGED 0x00010000 #define ATA_SE_PHY_IERROR 0x00020000 #define ATA_SE_COMM_WAKE 0x00040000 #define ATA_SE_DECODE_ERR 0x00080000 #define ATA_SE_PARITY_ERR 0x00100000 #define ATA_SE_CRC_ERR 0x00200000 #define ATA_SE_HANDSHAKE_ERR 0x00400000 #define ATA_SE_LINKSEQ_ERR 0x00800000 #define ATA_SE_TRANSPORT_ERR 0x01000000 #define ATA_SE_UNKNOWN_FIS 0x02000000 #define ATA_SE_EXCHANGED 0x04000000 #define ATA_SCONTROL 15 #define ATA_SC_DET_MASK 0x0000000f #define ATA_SC_DET_IDLE 0x00000000 #define ATA_SC_DET_RESET 0x00000001 #define ATA_SC_DET_DISABLE 0x00000004 #define ATA_SC_SPD_MASK 0x000000f0 #define ATA_SC_SPD_NO_SPEED 0x00000000 #define ATA_SC_SPD_SPEED_GEN1 0x00000010 #define ATA_SC_SPD_SPEED_GEN2 0x00000020 #define ATA_SC_SPD_SPEED_GEN3 0x00000030 #define ATA_SC_IPM_MASK 0x00000f00 #define ATA_SC_IPM_NONE 0x00000000 #define ATA_SC_IPM_DIS_PARTIAL 0x00000100 #define ATA_SC_IPM_DIS_SLUMBER 0x00000200 #define ATA_SC_IPM_DIS_DEVSLEEP 0x00000400 #define ATA_SACTIVE 16 #define AHCI_MAX_PORTS 32 #define AHCI_MAX_SLOTS 32 #define AHCI_MAX_IRQS 16 /* SATA AHCI v1.0 register defines */ #define AHCI_CAP 0x00 #define AHCI_CAP_NPMASK 0x0000001f #define AHCI_CAP_SXS 0x00000020 #define AHCI_CAP_EMS 0x00000040 #define AHCI_CAP_CCCS 0x00000080 #define AHCI_CAP_NCS 0x00001F00 #define AHCI_CAP_NCS_SHIFT 8 #define AHCI_CAP_PSC 0x00002000 #define AHCI_CAP_SSC 0x00004000 #define AHCI_CAP_PMD 0x00008000 #define AHCI_CAP_FBSS 0x00010000 #define AHCI_CAP_SPM 0x00020000 #define AHCI_CAP_SAM 0x00080000 #define AHCI_CAP_ISS 0x00F00000 #define AHCI_CAP_ISS_SHIFT 20 #define AHCI_CAP_SCLO 0x01000000 #define AHCI_CAP_SAL 0x02000000 #define AHCI_CAP_SALP 0x04000000 #define AHCI_CAP_SSS 0x08000000 #define AHCI_CAP_SMPS 0x10000000 #define AHCI_CAP_SSNTF 0x20000000 #define AHCI_CAP_SNCQ 0x40000000 #define AHCI_CAP_64BIT 0x80000000 #define AHCI_GHC 0x04 #define AHCI_GHC_AE 0x80000000 #define AHCI_GHC_MRSM 0x00000004 #define AHCI_GHC_IE 0x00000002 #define AHCI_GHC_HR 0x00000001 #define AHCI_IS 0x08 #define AHCI_PI 0x0c #define AHCI_VS 0x10 #define AHCI_CCCC 0x14 #define AHCI_CCCC_TV_MASK 0xffff0000 #define AHCI_CCCC_TV_SHIFT 16 #define AHCI_CCCC_CC_MASK 0x0000ff00 #define AHCI_CCCC_CC_SHIFT 8 #define AHCI_CCCC_INT_MASK 0x000000f8 #define AHCI_CCCC_INT_SHIFT 3 #define AHCI_CCCC_EN 0x00000001 #define AHCI_CCCP 0x18 #define AHCI_EM_LOC 0x1C #define AHCI_EM_CTL 0x20 #define AHCI_EM_MR 0x00000001 #define AHCI_EM_TM 0x00000100 #define AHCI_EM_RST 0x00000200 #define AHCI_EM_LED 0x00010000 #define AHCI_EM_SAFTE 0x00020000 #define AHCI_EM_SES2 0x00040000 #define AHCI_EM_SGPIO 0x00080000 #define AHCI_EM_SMB 0x01000000 #define AHCI_EM_XMT 0x02000000 #define AHCI_EM_ALHD 0x04000000 #define AHCI_EM_PM 0x08000000 #define AHCI_CAP2 0x24 #define AHCI_CAP2_BOH 0x00000001 #define AHCI_CAP2_NVMP 0x00000002 #define AHCI_CAP2_APST 0x00000004 #define AHCI_CAP2_SDS 0x00000008 #define AHCI_CAP2_SADM 0x00000010 #define AHCI_CAP2_DESO 0x00000020 #define AHCI_BOHC 0x28 #define AHCI_BOHC_BOS 0x00000001 #define AHCI_BOHC_OOS 0x00000002 #define AHCI_BOHC_SOOE 0x00000004 #define AHCI_BOHC_OOC 0x00000008 #define AHCI_BOHC_BB 0x00000010 #define AHCI_VSCAP 0xa4 #define AHCI_OFFSET 0x100 #define AHCI_STEP 0x80 #define AHCI_P_CLB 0x00 #define AHCI_P_CLBU 0x04 #define AHCI_P_FB 0x08 #define AHCI_P_FBU 0x0c #define AHCI_P_IS 0x10 #define AHCI_P_IE 0x14 #define AHCI_P_IX_DHR 0x00000001 #define AHCI_P_IX_PS 0x00000002 #define AHCI_P_IX_DS 0x00000004 #define AHCI_P_IX_SDB 0x00000008 #define AHCI_P_IX_UF 0x00000010 #define AHCI_P_IX_DP 0x00000020 #define AHCI_P_IX_PC 0x00000040 #define AHCI_P_IX_MP 0x00000080 #define AHCI_P_IX_PRC 0x00400000 #define AHCI_P_IX_IPM 0x00800000 #define AHCI_P_IX_OF 0x01000000 #define AHCI_P_IX_INF 0x04000000 #define AHCI_P_IX_IF 0x08000000 #define AHCI_P_IX_HBD 0x10000000 #define AHCI_P_IX_HBF 0x20000000 #define AHCI_P_IX_TFE 0x40000000 #define AHCI_P_IX_CPD 0x80000000 #define AHCI_P_CMD 0x18 #define AHCI_P_CMD_ST 0x00000001 #define AHCI_P_CMD_SUD 0x00000002 #define AHCI_P_CMD_POD 0x00000004 #define AHCI_P_CMD_CLO 0x00000008 #define AHCI_P_CMD_FRE 0x00000010 #define AHCI_P_CMD_CCS_MASK 0x00001f00 #define AHCI_P_CMD_CCS_SHIFT 8 #define AHCI_P_CMD_ISS 0x00002000 #define AHCI_P_CMD_FR 0x00004000 #define AHCI_P_CMD_CR 0x00008000 #define AHCI_P_CMD_CPS 0x00010000 #define AHCI_P_CMD_PMA 0x00020000 #define AHCI_P_CMD_HPCP 0x00040000 #define AHCI_P_CMD_MPSP 0x00080000 #define AHCI_P_CMD_CPD 0x00100000 #define AHCI_P_CMD_ESP 0x00200000 #define AHCI_P_CMD_FBSCP 0x00400000 #define AHCI_P_CMD_APSTE 0x00800000 #define AHCI_P_CMD_ATAPI 0x01000000 #define AHCI_P_CMD_DLAE 0x02000000 #define AHCI_P_CMD_ALPE 0x04000000 #define AHCI_P_CMD_ASP 0x08000000 #define AHCI_P_CMD_ICC_MASK 0xf0000000 #define AHCI_P_CMD_NOOP 0x00000000 #define AHCI_P_CMD_ACTIVE 0x10000000 #define AHCI_P_CMD_PARTIAL 0x20000000 #define AHCI_P_CMD_SLUMBER 0x60000000 #define AHCI_P_CMD_DEVSLEEP 0x80000000 #define AHCI_P_TFD 0x20 #define AHCI_P_SIG 0x24 #define AHCI_P_SSTS 0x28 #define AHCI_P_SCTL 0x2c #define AHCI_P_SERR 0x30 #define AHCI_P_SACT 0x34 #define AHCI_P_CI 0x38 #define AHCI_P_SNTF 0x3C #define AHCI_P_FBS 0x40 #define AHCI_P_FBS_EN 0x00000001 #define AHCI_P_FBS_DEC 0x00000002 #define AHCI_P_FBS_SDE 0x00000004 #define AHCI_P_FBS_DEV 0x00000f00 #define AHCI_P_FBS_DEV_SHIFT 8 #define AHCI_P_FBS_ADO 0x0000f000 #define AHCI_P_FBS_ADO_SHIFT 12 #define AHCI_P_FBS_DWE 0x000f0000 #define AHCI_P_FBS_DWE_SHIFT 16 #define AHCI_P_DEVSLP 0x44 #define AHCI_P_DEVSLP_ADSE 0x00000001 #define AHCI_P_DEVSLP_DSP 0x00000002 #define AHCI_P_DEVSLP_DETO 0x000003fc #define AHCI_P_DEVSLP_DETO_SHIFT 2 #define AHCI_P_DEVSLP_MDAT 0x00007c00 #define AHCI_P_DEVSLP_MDAT_SHIFT 10 #define AHCI_P_DEVSLP_DITO 0x01ff8000 #define AHCI_P_DEVSLP_DITO_SHIFT 15 #define AHCI_P_DEVSLP_DM 0x0e000000 #define AHCI_P_DEVSLP_DM_SHIFT 25 /* Pessimistic prognosis on number of required S/G entries */ #define AHCI_SG_ENTRIES MIN(roundup(btoc(maxphys) + 1, 8), 65528) /* Command list. 32 commands. First, 1Kbyte aligned. */ #define AHCI_CL_OFFSET 0 #define AHCI_CL_SIZE 32 /* Command tables. Up to 32 commands, Each, 128byte aligned. */ #define AHCI_CT_OFFSET (AHCI_CL_OFFSET + AHCI_CL_SIZE * AHCI_MAX_SLOTS) #define AHCI_CT_SIZE (128 + AHCI_SG_ENTRIES * 16) /* Total main work area. */ #define AHCI_WORK_SIZE (AHCI_CT_OFFSET + AHCI_CT_SIZE * ch->numslots) /* ivars value fields */ #define AHCI_REMAPPED_UNIT (1 << 31) /* NVMe remapped device. */ #define AHCI_EM_UNIT (1 << 30) /* Enclosure Mgmt device. */ #define AHCI_UNIT 0xff /* Channel number. */ struct ahci_dma_prd { u_int64_t dba; u_int32_t reserved; u_int32_t dbc; /* 0 based */ #define AHCI_PRD_MASK 0x003fffff /* max 4MB */ #define AHCI_PRD_MAX (AHCI_PRD_MASK + 1) #define AHCI_PRD_IPC (1U << 31) } __packed; struct ahci_cmd_tab { u_int8_t cfis[64]; u_int8_t acmd[32]; u_int8_t reserved[32]; struct ahci_dma_prd prd_tab[]; } __packed; struct ahci_cmd_list { u_int16_t cmd_flags; #define AHCI_CMD_ATAPI 0x0020 #define AHCI_CMD_WRITE 0x0040 #define AHCI_CMD_PREFETCH 0x0080 #define AHCI_CMD_RESET 0x0100 #define AHCI_CMD_BIST 0x0200 #define AHCI_CMD_CLR_BUSY 0x0400 u_int16_t prd_length; /* PRD entries */ u_int32_t bytecount; u_int64_t cmd_table_phys; /* 128byte aligned */ } __packed; /* misc defines */ #define ATA_IRQ_RID 0 #define ATA_INTR_FLAGS (INTR_MPSAFE|INTR_TYPE_BIO|INTR_ENTROPY) struct ata_dmaslot { bus_dmamap_t data_map; /* data DMA map */ int nsegs; /* Number of segs loaded */ }; /* structure holding DMA related information */ struct ata_dma { bus_dma_tag_t work_tag; /* workspace DMA tag */ bus_dmamap_t work_map; /* workspace DMA map */ uint8_t *work; /* workspace */ bus_addr_t work_bus; /* bus address of work */ bus_dma_tag_t rfis_tag; /* RFIS list DMA tag */ bus_dmamap_t rfis_map; /* RFIS list DMA map */ uint8_t *rfis; /* FIS receive area */ bus_addr_t rfis_bus; /* bus address of rfis */ bus_dma_tag_t data_tag; /* data DMA tag */ }; enum ahci_slot_states { AHCI_SLOT_EMPTY, AHCI_SLOT_LOADING, AHCI_SLOT_RUNNING, AHCI_SLOT_EXECUTING }; struct ahci_slot { struct ahci_channel *ch; /* Channel */ u_int8_t slot; /* Number of this slot */ enum ahci_slot_states state; /* Slot state */ u_int ct_offset; /* cmd_tab offset */ union ccb *ccb; /* CCB occupying slot */ struct ata_dmaslot dma; /* DMA data of this slot */ struct callout timeout; /* Execution timeout */ }; struct ahci_device { int revision; int mode; u_int bytecount; u_int atapi; u_int tags; u_int caps; }; struct ahci_led { device_t dev; /* Device handle */ struct cdev *led; uint8_t num; /* Number of this led */ uint8_t state; /* State of this led */ }; #define AHCI_NUM_LEDS 3 /* structure describing an ATA channel */ struct ahci_channel { device_t dev; /* Device handle */ int unit; /* Physical channel */ struct resource *r_mem; /* Memory of this channel */ struct resource *r_irq; /* Interrupt of this channel */ void *ih; /* Interrupt handle */ struct ata_dma dma; /* DMA data */ struct cam_sim *sim; struct cam_path *path; uint32_t caps; /* Controller capabilities */ uint32_t caps2; /* Controller capabilities */ uint32_t chcaps; /* Channel capabilities */ uint32_t chscaps; /* Channel sleep capabilities */ uint16_t vendorid; /* Vendor ID from the bus */ uint16_t deviceid; /* Device ID from the bus */ uint16_t subvendorid; /* Subvendor ID from the bus */ uint16_t subdeviceid; /* Subdevice ID from the bus */ int quirks; int numslots; /* Number of present slots */ int pm_level; /* power management level */ int devices; /* What is present */ int pm_present; /* PM presence reported */ int fbs_enabled; /* FIS-based switching enabled */ void (*start)(struct ahci_channel *); union ccb *hold[AHCI_MAX_SLOTS]; struct ahci_slot slot[AHCI_MAX_SLOTS]; uint32_t oslots; /* Occupied slots */ uint32_t rslots; /* Running slots */ uint32_t aslots; /* Slots with atomic commands */ uint32_t eslots; /* Slots in error */ uint32_t toslots; /* Slots in timeout */ int lastslot; /* Last used slot */ int taggedtarget; /* Last tagged target */ int numrslots; /* Number of running slots */ int numrslotspd[16];/* Number of running slots per dev */ int numtslots; /* Number of tagged slots */ int numtslotspd[16];/* Number of tagged slots per dev */ int numhslots; /* Number of held slots */ int recoverycmd; /* Our READ LOG active */ int fatalerr; /* Fatal error happened */ int resetting; /* Hard-reset in progress. */ int resetpolldiv; /* Hard-reset poll divider. */ int listening; /* SUD bit is cleared. */ int wrongccs; /* CCS field in CMD was wrong */ union ccb *frozen; /* Frozen command */ struct callout pm_timer; /* Power management events */ struct callout reset_timer; /* Hard-reset timeout */ struct ahci_device user[16]; /* User-specified settings */ struct ahci_device curr[16]; /* Current settings */ struct mtx_padalign mtx; /* state lock */ STAILQ_HEAD(, ccb_hdr) doneq; /* queue of completed CCBs */ int batch; /* doneq is in use */ int disablephy; /* keep PHY disabled */ }; struct ahci_enclosure { device_t dev; /* Device handle */ struct resource *r_memc; /* Control register */ struct resource *r_memt; /* Transmit buffer */ struct resource *r_memr; /* Receive buffer */ struct cam_sim *sim; struct cam_path *path; struct mtx mtx; /* state lock */ struct ahci_led leds[AHCI_MAX_PORTS * 3]; uint32_t capsem; /* Controller capabilities */ uint8_t status[AHCI_MAX_PORTS][4]; /* ArrayDev statuses */ int quirks; int channels; uint32_t ichannels; }; /* structure describing a AHCI controller */ struct ahci_controller { device_t dev; bus_dma_tag_t dma_tag; int r_rid; int r_msix_tab_rid; int r_msix_pba_rid; uint16_t vendorid; /* Vendor ID from the bus */ uint16_t deviceid; /* Device ID from the bus */ uint16_t subvendorid; /* Subvendor ID from the bus */ uint16_t subdeviceid; /* Subdevice ID from the bus */ struct resource *r_mem; struct resource *r_msix_table; struct resource *r_msix_pba; struct rman sc_iomem; struct ahci_controller_irq { struct ahci_controller *ctlr; struct resource *r_irq; void *handle; int r_irq_rid; int mode; #define AHCI_IRQ_MODE_ALL 0 #define AHCI_IRQ_MODE_AFTER 1 #define AHCI_IRQ_MODE_ONE 2 } irqs[AHCI_MAX_IRQS]; uint32_t caps; /* Controller capabilities */ uint32_t caps2; /* Controller capabilities */ uint32_t capsem; /* Controller capabilities */ uint32_t emloc; /* EM buffer location */ int quirks; int numirqs; int channels; uint32_t ichannels; int ccc; /* CCC timeout */ int cccv; /* CCC vector */ int direct; /* Direct command completion */ int msi; /* MSI interupts */ int remapped_devices; /* Remapped NVMe devices */ uint32_t remap_offset; uint32_t remap_size; struct { void (*function)(void *); void *argument; } interrupt[AHCI_MAX_PORTS]; void (*ch_start)(struct ahci_channel *); int dma_coherent; /* DMA is cache-coherent */ struct mtx ch_mtx; /* Lock for attached channels */ struct ahci_channel *ch[AHCI_MAX_PORTS]; /* Attached channels */ }; enum ahci_err_type { AHCI_ERR_NONE, /* No error */ AHCI_ERR_INVALID, /* Error detected by us before submitting. */ AHCI_ERR_INNOCENT, /* Innocent victim. */ AHCI_ERR_TFE, /* Task File Error. */ AHCI_ERR_SATA, /* SATA error. */ AHCI_ERR_TIMEOUT, /* Command execution timeout. */ AHCI_ERR_NCQ, /* NCQ command error. CCB should be put on hold * until READ LOG executed to reveal error. */ }; /* macros to hide busspace uglyness */ #define ATA_INB(res, offset) \ bus_read_1((res), (offset)) #define ATA_INW(res, offset) \ bus_read_2((res), (offset)) #define ATA_INL(res, offset) \ bus_read_4((res), (offset)) #define ATA_INSW(res, offset, addr, count) \ bus_read_multi_2((res), (offset), (addr), (count)) #define ATA_INSW_STRM(res, offset, addr, count) \ bus_read_multi_stream_2((res), (offset), (addr), (count)) #define ATA_INSL(res, offset, addr, count) \ bus_read_multi_4((res), (offset), (addr), (count)) #define ATA_INSL_STRM(res, offset, addr, count) \ bus_read_multi_stream_4((res), (offset), (addr), (count)) #define ATA_OUTB(res, offset, value) \ bus_write_1((res), (offset), (value)) #define ATA_OUTW(res, offset, value) \ bus_write_2((res), (offset), (value)) #define ATA_OUTL(res, offset, value) \ bus_write_4((res), (offset), (value)) #define ATA_OUTSW(res, offset, addr, count) \ bus_write_multi_2((res), (offset), (addr), (count)) #define ATA_OUTSW_STRM(res, offset, addr, count) \ bus_write_multi_stream_2((res), (offset), (addr), (count)) #define ATA_OUTSL(res, offset, addr, count) \ bus_write_multi_4((res), (offset), (addr), (count)) #define ATA_OUTSL_STRM(res, offset, addr, count) \ bus_write_multi_stream_4((res), (offset), (addr), (count)) /* * On some platforms, we must ensure proper interdevice write ordering. * The AHCI interrupt status register must be updated in HW before * registers in interrupt controller. * Unfortunately, only way how we can do it is readback. * * Currently, only ARM is known to have this issue. */ #if defined(__arm__) #define ATA_RBL(res, offset) \ bus_read_4((res), (offset)) #else #define ATA_RBL(res, offset) #endif #define AHCI_Q_NOFORCE 0x00000001 #define AHCI_Q_NOPMP 0x00000002 #define AHCI_Q_NONCQ 0x00000004 #define AHCI_Q_1CH 0x00000008 #define AHCI_Q_2CH 0x00000010 #define AHCI_Q_4CH 0x00000020 #define AHCI_Q_EDGEIS 0x00000040 #define AHCI_Q_SATA2 0x00000080 #define AHCI_Q_NOBSYRES 0x00000100 #define AHCI_Q_NOAA 0x00000200 #define AHCI_Q_NOCOUNT 0x00000400 #define AHCI_Q_ALTSIG 0x00000800 #define AHCI_Q_NOMSI 0x00001000 #define AHCI_Q_ATI_PMP_BUG 0x00002000 #define AHCI_Q_MAXIO_64K 0x00004000 #define AHCI_Q_SATA1_UNIT0 0x00008000 /* need better method for this */ #define AHCI_Q_ABAR0 0x00010000 #define AHCI_Q_1MSI 0x00020000 #define AHCI_Q_FORCE_PI 0x00040000 #define AHCI_Q_RESTORE_CAP 0x00080000 #define AHCI_Q_NOMSIX 0x00100000 #define AHCI_Q_MRVL_SR_DEL 0x00200000 #define AHCI_Q_NOCCS 0x00400000 #define AHCI_Q_NOAUX 0x00800000 #define AHCI_Q_IOMMU_BUSWIDE 0x01000000 #define AHCI_Q_BIT_STRING \ "\020" \ "\001NOFORCE" \ "\002NOPMP" \ "\003NONCQ" \ "\0041CH" \ "\0052CH" \ "\0064CH" \ "\007EDGEIS" \ "\010SATA2" \ "\011NOBSYRES" \ "\012NOAA" \ "\013NOCOUNT" \ "\014ALTSIG" \ "\015NOMSI" \ "\016ATI_PMP_BUG" \ "\017MAXIO_64K" \ "\020SATA1_UNIT0" \ "\021ABAR0" \ "\0221MSI" \ "\023FORCE_PI" \ "\024RESTORE_CAP" \ "\025NOMSIX" \ "\026MRVL_SR_DEL" \ "\027NOCCS" \ "\030NOAUX" \ "\031IOMMU_BUSWIDE" int ahci_attach(device_t dev); int ahci_detach(device_t dev); int ahci_setup_interrupt(device_t dev); int ahci_print_child(device_t dev, device_t child); struct resource *ahci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int ahci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int ahci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep); int ahci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); -int ahci_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen); +int ahci_child_location(device_t dev, device_t child, struct sbuf *sb); bus_dma_tag_t ahci_get_dma_tag(device_t dev, device_t child); int ahci_ctlr_reset(device_t dev); int ahci_ctlr_setup(device_t dev); void ahci_free_mem(device_t dev); /* Functions to allow AHCI EM to access other channels. */ void ahci_attached(device_t dev, struct ahci_channel *ch); void ahci_detached(device_t dev, struct ahci_channel *ch); struct ahci_channel * ahci_getch(device_t dev, int n); void ahci_putch(struct ahci_channel *ch); extern devclass_t ahci_devclass; diff --git a/sys/dev/ahci/ahci_fsl_fdt.c b/sys/dev/ahci/ahci_fsl_fdt.c index f7491ee9768c..0fda9c28294b 100644 --- a/sys/dev/ahci/ahci_fsl_fdt.c +++ b/sys/dev/ahci/ahci_fsl_fdt.c @@ -1,424 +1,424 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2020 Alstom Group * Copyright (c) 2020 Semihalf * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* AHCI controller driver for NXP QorIQ Layerscape SoCs. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define AHCI_FSL_REG_PHY1 0xa8 #define AHCI_FSL_REG_PHY2 0xac #define AHCI_FSL_REG_PHY3 0xb0 #define AHCI_FSL_REG_PHY4 0xb4 #define AHCI_FSL_REG_PHY5 0xb8 #define AHCI_FSL_REG_AXICC 0xbc #define AHCI_FSL_REG_PTC 0xc8 #define AHCI_FSL_LS1021A_AXICC 0xc0 #define AHCI_FSL_REG_PHY1_TTA_MASK 0x0001ffff #define AHCI_FSL_REG_PHY1_SNM (1 << 17) #define AHCI_FSL_REG_PHY1_SNR (1 << 18) #define AHCI_FSL_REG_PHY1_FPR (1 << 20) #define AHCI_FSL_REG_PHY1_PBPS_LBP 0 #define AHCI_FSL_REG_PHY1_PBPS_LFTP (0x01 << 21) #define AHCI_FSL_REG_PHY1_PBPS_MFTP (0x02 << 21) #define AHCI_FSL_REG_PHY1_PBPS_HFTP (0x03 << 21) #define AHCI_FSL_REG_PHY1_PBPS_PRBS (0x04 << 21) #define AHCI_FSL_REG_PHY1_PBPS_BIST (0x05 << 21) #define AHCI_FSL_REG_PHY1_PBPE (1 << 24) #define AHCI_FSL_REG_PHY1_PBCE (1 << 25) #define AHCI_FSL_REG_PHY1_PBPNA (1 << 26) #define AHCI_FSL_REG_PHY1_STB (1 << 27) #define AHCI_FSL_REG_PHY1_PSSO (1 << 28) #define AHCI_FSL_REG_PHY1_PSS (1 << 29) #define AHCI_FSL_REG_PHY1_ERSN (1 << 30) #define AHCI_FSL_REG_PHY1_ESDF (1 << 31) #define AHCI_FSL_REG_PHY_MASK 0xff #define AHCI_FSL_PHY2_CIBGMN_SHIFT 0 #define AHCI_FSL_PHY2_CIBGMX_SHIFT 8 #define AHCI_FSL_PHY2_CIBGN_SHIFT 16 #define AHCI_FSL_PHY2_CINMP_SHIFT 24 #define AHCI_FSL_PHY3_CWBGMN_SHIFT 0 #define AHCI_FSL_PHY3_CWBGMX_SHIFT 8 #define AHCI_FSL_PHY3_CWBGN_SHIFT 16 #define AHCI_FSL_PHY3_CWNMP_SHIFT 24 /* Only in LS1021A */ #define AHCI_FSL_PHY4_BMX_SHIFT 0 #define AHCI_FSL_PHY4_BNM_SHIFT 8 #define AHCI_FSL_PHY4_SFD_SHIFT 16 #define AHCI_FSL_PHY4_PTST_SHIFT 24 /* Only in LS1021A */ #define AHCI_FSL_PHY5_RIT_SHIFT 0 #define AHCI_FSL_PHY5_RCT_SHIFT 20 #define AHCI_FSL_REG_PTC_RXWM_MASK 0x0000007f #define AHCI_FSL_REG_PTC_ENBD (1 << 8) #define AHCI_FSL_REG_PTC_ITM (1 << 9) #define AHCI_FSL_REG_PHY1_CFG \ ((0x1fffe & AHCI_FSL_REG_PHY1_TTA_MASK) | \ AHCI_FSL_REG_PHY1_SNM | AHCI_FSL_REG_PHY1_PSS | AHCI_FSL_REG_PHY1_ESDF) #define AHCI_FSL_REG_PHY2_CFG \ ((0x1f << AHCI_FSL_PHY2_CIBGMN_SHIFT) | \ (0x4d << AHCI_FSL_PHY2_CIBGMX_SHIFT) | \ (0x18 << AHCI_FSL_PHY2_CIBGN_SHIFT) | \ (0x28 << AHCI_FSL_PHY2_CINMP_SHIFT)) #define AHCI_FSL_REG_PHY2_CFG_LS1021A \ ((0x14 << AHCI_FSL_PHY2_CIBGMN_SHIFT) | \ (0x34 << AHCI_FSL_PHY2_CIBGMX_SHIFT) | \ (0x18 << AHCI_FSL_PHY2_CIBGN_SHIFT) | \ (0x28 << AHCI_FSL_PHY2_CINMP_SHIFT)) #define AHCI_FSL_REG_PHY3_CFG \ ((0x09 << AHCI_FSL_PHY3_CWBGMN_SHIFT) | \ (0x15 << AHCI_FSL_PHY3_CWBGMX_SHIFT) | \ (0x08 << AHCI_FSL_PHY3_CWBGN_SHIFT) | \ (0x0e << AHCI_FSL_PHY3_CWNMP_SHIFT)) #define AHCI_FSL_REG_PHY3_CFG_LS1021A \ ((0x06 << AHCI_FSL_PHY3_CWBGMN_SHIFT) | \ (0x0e << AHCI_FSL_PHY3_CWBGMX_SHIFT) | \ (0x08 << AHCI_FSL_PHY3_CWBGN_SHIFT) | \ (0x0e << AHCI_FSL_PHY3_CWNMP_SHIFT)) #define AHCI_FSL_REG_PHY4_CFG_LS1021A \ ((0x0b << AHCI_FSL_PHY4_BMX_SHIFT) | \ (0x08 << AHCI_FSL_PHY4_BNM_SHIFT) | \ (0x4a << AHCI_FSL_PHY4_SFD_SHIFT) | \ (0x06 << AHCI_FSL_PHY4_PTST_SHIFT)) #define AHCI_FSL_REG_PHY5_CFG_LS1021A \ ((0x86470 << AHCI_FSL_PHY5_RIT_SHIFT) | \ (0x2aa << AHCI_FSL_PHY5_RCT_SHIFT)) /* Bit 27 enabled so value of reserved bits remains as in documentation. */ #define AHCI_FSL_REG_PTC_CFG \ ((0x29 & AHCI_FSL_REG_PTC_RXWM_MASK) | (1 << 27)) #define AHCI_FSL_REG_AXICC_CFG 0x3fffffff #define AHCI_FSL_REG_ECC 0x0 #define AHCI_FSL_REG_ECC_LS1021A 0x00020000 #define AHCI_FSL_REG_ECC_LS1043A 0x80000000 #define AHCI_FSL_REG_ECC_LS1028A 0x40000000 #define QORIQ_AHCI_LS1021A 1 #define QORIQ_AHCI_LS1028A 2 #define QORIQ_AHCI_LS1043A 3 #define QORIQ_AHCI_LS2080A 4 #define QORIQ_AHCI_LS1046A 5 #define QORIQ_AHCI_LS1088A 6 #define QORIQ_AHCI_LS2088A 7 #define QORIQ_AHCI_LX2160A 8 struct ahci_fsl_fdt_controller { struct ahci_controller ctlr; /* Must be the first field. */ int soc_type; struct resource *r_ecc; int r_ecc_rid; }; static const struct ofw_compat_data ahci_fsl_fdt_compat_data[] = { {"fsl,ls1021a-ahci", QORIQ_AHCI_LS1021A}, {"fsl,ls1028a-ahci", QORIQ_AHCI_LS1028A}, {"fsl,ls1043a-ahci", QORIQ_AHCI_LS1043A}, {"fsl,ls2080a-ahci", QORIQ_AHCI_LS2080A}, {"fsl,ls1046a-ahci", QORIQ_AHCI_LS1046A}, {"fsl,ls1088a-ahci", QORIQ_AHCI_LS1088A}, {"fsl,ls2088a-ahci", QORIQ_AHCI_LS2088A}, {"fsl,lx2160a-ahci", QORIQ_AHCI_LX2160A}, {NULL, 0} }; static bool ecc_inited; static int ahci_fsl_fdt_ecc_init(struct ahci_fsl_fdt_controller *ctrl) { uint32_t val; switch (ctrl->soc_type) { case QORIQ_AHCI_LS2080A: case QORIQ_AHCI_LS2088A: return (0); case QORIQ_AHCI_LS1021A: if (!ecc_inited && ctrl->r_ecc == NULL) return (ENXIO); if (!ecc_inited) ATA_OUTL(ctrl->r_ecc, AHCI_FSL_REG_ECC, AHCI_FSL_REG_ECC_LS1021A); break; case QORIQ_AHCI_LS1043A: case QORIQ_AHCI_LS1046A: if (!ecc_inited && ctrl->r_ecc == NULL) return (ENXIO); if (!ecc_inited) { val = ATA_INL(ctrl->r_ecc, AHCI_FSL_REG_ECC); val = AHCI_FSL_REG_ECC_LS1043A; ATA_OUTL(ctrl->r_ecc, AHCI_FSL_REG_ECC, val); } break; case QORIQ_AHCI_LS1028A: case QORIQ_AHCI_LS1088A: case QORIQ_AHCI_LX2160A: if (!ecc_inited && ctrl->r_ecc == NULL) return (ENXIO); if (!ecc_inited) { val = ATA_INL(ctrl->r_ecc, AHCI_FSL_REG_ECC); val |= AHCI_FSL_REG_ECC_LS1028A; ATA_OUTL(ctrl->r_ecc, AHCI_FSL_REG_ECC, val); } break; default: panic("Unimplemented SOC type: %d", ctrl->soc_type); } ecc_inited = true; return (0); } static void ahci_fsl_fdt_phy_init(struct ahci_fsl_fdt_controller *ctrl) { struct ahci_controller *ahci; ahci = &ctrl->ctlr; if (ctrl->soc_type == QORIQ_AHCI_LS1021A) { ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY1, AHCI_FSL_REG_PHY1_CFG); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY2, AHCI_FSL_REG_PHY2_CFG_LS1021A); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY3, AHCI_FSL_REG_PHY3_CFG_LS1021A); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY4, AHCI_FSL_REG_PHY4_CFG_LS1021A); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY5, AHCI_FSL_REG_PHY5_CFG_LS1021A); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PTC, AHCI_FSL_REG_PTC_CFG); if (ctrl->ctlr.dma_coherent) ATA_OUTL(ahci->r_mem, AHCI_FSL_LS1021A_AXICC, AHCI_FSL_REG_AXICC_CFG); } else { ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY1, AHCI_FSL_REG_PHY1_CFG); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY2, AHCI_FSL_REG_PHY2_CFG); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PHY3, AHCI_FSL_REG_PHY3_CFG); ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_PTC, AHCI_FSL_REG_PTC_CFG); if (ctrl->ctlr.dma_coherent) ATA_OUTL(ahci->r_mem, AHCI_FSL_REG_AXICC, AHCI_FSL_REG_AXICC_CFG); } } static int ahci_fsl_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, ahci_fsl_fdt_compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "NXP QorIQ Layerscape AHCI controller"); return (BUS_PROBE_DEFAULT); } static int ahci_fsl_fdt_attach(device_t dev) { struct ahci_fsl_fdt_controller *ctlr; struct ahci_controller *ahci; phandle_t node; clk_t clock; int ret; node = ofw_bus_get_node(dev); ctlr = device_get_softc(dev); ctlr->soc_type = ofw_bus_search_compatible(dev, ahci_fsl_fdt_compat_data)->ocd_data; ahci = &ctlr->ctlr; ahci->dev = dev; ahci->r_rid = 0; ahci->quirks = AHCI_Q_NOPMP; ahci->dma_coherent = OF_hasprop(node, "dma-coherent"); ret = clk_get_by_ofw_index(dev, node, 0, &clock); if (ret != 0) { device_printf(dev, "No clock found.\n"); return (ENXIO); } ret = clk_enable(clock); if (ret !=0) { device_printf(dev, "Could not enable clock.\n"); return (ENXIO); } if (OF_hasprop(node, "reg-names") && ofw_bus_find_string_index(node, "reg-names", "ahci", &ahci->r_rid)) { device_printf(dev, "Could not locate 'ahci' string in the " "'reg-names' property"); return (ENOENT); } ahci->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ahci->r_rid, RF_ACTIVE); if (!ahci->r_mem) { device_printf(dev, "Could not allocate resources for controller\n"); return (ENOMEM); } ret = ofw_bus_find_string_index(node, "reg-names", "sata-ecc", &ctlr->r_ecc_rid); if (ret == 0) { ctlr->r_ecc = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_ecc_rid, RF_ACTIVE| RF_SHAREABLE); if (!ctlr->r_ecc) { device_printf(dev, "Could not allocate resources for controller\n"); ret = ENOMEM; goto err_free_mem; } } else if (ret != ENOENT) { device_printf(dev, "Could not locate 'sata-ecc' string in " "the 'reg-names' property"); goto err_free_mem; } ret = ahci_fsl_fdt_ecc_init(ctlr); if (ret != 0) { device_printf(dev, "Could not initialize 'ecc' registers"); goto err_free_mem; } /* Setup controller defaults. */ ahci->numirqs = 1; ahci_fsl_fdt_phy_init(ctlr); /* Reset controller. */ ret = ahci_ctlr_reset(dev); if (ret) goto err_free_mem; ret = ahci_attach(dev); if (ret) { device_printf(dev, "Could not initialize AHCI, with error: %d\n", ret); goto err_free_ecc; } return (0); err_free_mem: bus_free_resource(dev, SYS_RES_MEMORY, ahci->r_mem); err_free_ecc: if (ctlr->r_ecc) bus_free_resource(dev, SYS_RES_MEMORY, ctlr->r_ecc); return (ret); } static int ahci_fsl_fdt_detach(device_t dev) { struct ahci_fsl_fdt_controller *ctlr; ctlr = device_get_softc(dev); if (ctlr->r_ecc) bus_free_resource(dev, SYS_RES_MEMORY, ctlr->r_ecc); return ahci_detach(dev); } static const device_method_t ahci_fsl_fdt_methods[] = { DEVMETHOD(device_probe, ahci_fsl_fdt_probe), DEVMETHOD(device_attach, ahci_fsl_fdt_attach), DEVMETHOD(device_detach, ahci_fsl_fdt_detach), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr, ahci_teardown_intr), DEVMETHOD(bus_print_child, ahci_print_child), - DEVMETHOD(bus_child_location_str, ahci_child_location_str), + DEVMETHOD(bus_child_location, ahci_child_location), DEVMETHOD(bus_get_dma_tag, ahci_get_dma_tag), DEVMETHOD_END }; static driver_t ahci_fsl_fdt_driver = { "ahci", ahci_fsl_fdt_methods, sizeof(struct ahci_fsl_fdt_controller), }; static devclass_t ahci_fsl_fdt_devclass; DRIVER_MODULE(ahci_fsl, simplebus, ahci_fsl_fdt_driver, ahci_fsl_fdt_devclass, NULL, NULL); DRIVER_MODULE(ahci_fsl, ofwbus, ahci_fsl_fdt_driver, ahci_fsl_fdt_devclass, NULL, NULL); diff --git a/sys/dev/ahci/ahci_generic.c b/sys/dev/ahci/ahci_generic.c index 79130e9e9e57..108aaa2adf3a 100644 --- a/sys/dev/ahci/ahci_generic.c +++ b/sys/dev/ahci/ahci_generic.c @@ -1,220 +1,220 @@ /*- * Copyright (c) 2009-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ACPI #include #include #include #include #endif #ifdef FDT #include #include static struct ofw_compat_data compat_data[] = { {"generic-ahci", 1}, {"snps,dwc-ahci", 1}, {"marvell,armada-3700-ahci", 1}, {NULL, 0} }; static int ahci_fdt_probe(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); phandle_t node; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc_copy(dev, "AHCI SATA controller"); node = ofw_bus_get_node(dev); ctlr->dma_coherent = OF_hasprop(node, "dma-coherent"); return (BUS_PROBE_DEFAULT); } #endif #ifdef DEV_ACPI static const struct ahci_acpi_quirk { const char* hid; u_int quirks; } ahci_acpi_quirks[] = { { "NXP0004", AHCI_Q_NOPMP }, { NULL, 0 } }; static int ahci_acpi_probe(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); ACPI_HANDLE h; int i; if ((h = acpi_get_handle(dev)) == NULL) return (ENXIO); if (pci_get_class(dev) == PCIC_STORAGE && pci_get_subclass(dev) == PCIS_STORAGE_SATA && pci_get_progif(dev) == PCIP_STORAGE_SATA_AHCI_1_0) { device_set_desc_copy(dev, "AHCI SATA controller"); if (ACPI_FAILURE(acpi_GetInteger(h, "_CCA", &ctlr->dma_coherent))) ctlr->dma_coherent = 0; if (bootverbose) device_printf(dev, "Bus is%s cache-coherent\n", ctlr->dma_coherent ? "" : " not"); for (i = 0; ahci_acpi_quirks[i].hid != NULL; i++) { if (acpi_MatchHid(h, ahci_acpi_quirks[i].hid) != ACPI_MATCHHID_NOMATCH) { ctlr->quirks = ahci_acpi_quirks[i].quirks; break; } } return (BUS_PROBE_DEFAULT); } return (ENXIO); } #endif static int ahci_gen_ctlr_reset(device_t dev) { return ahci_ctlr_reset(dev); } static int ahci_gen_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int error; ctlr->r_rid = 0; ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE); if (ctlr->r_mem == NULL) return (ENXIO); /* Setup controller defaults. */ ctlr->numirqs = 1; /* Reset controller */ if ((error = ahci_gen_ctlr_reset(dev)) == 0) error = ahci_attach(dev); if (error != 0) { if (ctlr->r_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); } return error; } static int ahci_gen_detach(device_t dev) { ahci_detach(dev); return (0); } #ifdef FDT static devclass_t ahci_gen_fdt_devclass; static device_method_t ahci_fdt_methods[] = { DEVMETHOD(device_probe, ahci_fdt_probe), DEVMETHOD(device_attach, ahci_gen_attach), DEVMETHOD(device_detach, ahci_gen_detach), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr,ahci_teardown_intr), - DEVMETHOD(bus_child_location_str, ahci_child_location_str), + DEVMETHOD(bus_child_location, ahci_child_location), DEVMETHOD(bus_get_dma_tag, ahci_get_dma_tag), DEVMETHOD_END }; static driver_t ahci_fdt_driver = { "ahci", ahci_fdt_methods, sizeof(struct ahci_controller) }; DRIVER_MODULE(ahci_fdt, simplebus, ahci_fdt_driver, ahci_gen_fdt_devclass, NULL, NULL); #endif #ifdef DEV_ACPI static devclass_t ahci_gen_acpi_devclass; static device_method_t ahci_acpi_methods[] = { DEVMETHOD(device_probe, ahci_acpi_probe), DEVMETHOD(device_attach, ahci_gen_attach), DEVMETHOD(device_detach, ahci_gen_detach), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr,ahci_teardown_intr), - DEVMETHOD(bus_child_location_str, ahci_child_location_str), + DEVMETHOD(bus_child_location, ahci_child_location), DEVMETHOD(bus_get_dma_tag, ahci_get_dma_tag), DEVMETHOD_END }; static driver_t ahci_acpi_driver = { "ahci", ahci_acpi_methods, sizeof(struct ahci_controller) }; DRIVER_MODULE(ahci_acpi, acpi, ahci_acpi_driver, ahci_gen_acpi_devclass, NULL, NULL); #endif diff --git a/sys/dev/ahci/ahci_mv_fdt.c b/sys/dev/ahci/ahci_mv_fdt.c index b88ff2713587..ee08093482c5 100644 --- a/sys/dev/ahci/ahci_mv_fdt.c +++ b/sys/dev/ahci/ahci_mv_fdt.c @@ -1,155 +1,155 @@ /* * Copyright (c) 2017 Semihalf. * Copyright (c) 2017 Stormshield. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define AHCI_VENDOR_SPECIFIC_0_ADDR 0xa0 #define AHCI_VENDOR_SPECIFIC_0_DATA 0xa4 #define AHCI_HC_DEVSTR "Marvell AHCI Controller" #define AHCI_HC_VENDOR "Marvell" static device_attach_t ahci_mv_fdt_attach; static struct ofw_compat_data compatible_data[] = { {"marvell,armada-380-ahci", true}, {NULL, false} }; static void ahci_mv_regret_config(struct ahci_controller *ctlr) { /* * Enable the regret bit to allow the SATA unit to regret * a request that didn't receive an acknowledge * and a avoid deadlock */ ATA_OUTL(ctlr->r_mem, AHCI_VENDOR_SPECIFIC_0_ADDR, 0x4); ATA_OUTL(ctlr->r_mem, AHCI_VENDOR_SPECIFIC_0_DATA, 0x80); } static int ahci_mv_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compatible_data)->ocd_data) return (ENXIO); device_set_desc(dev, AHCI_HC_DEVSTR); return (BUS_PROBE_DEFAULT); } static int ahci_mv_fdt_attach(device_t dev) { struct ahci_controller *ctlr; int rc; ctlr = device_get_softc(dev); ctlr->dev = dev; ctlr->r_rid = 0; ctlr->quirks = AHCI_Q_2CH; ctlr->numirqs = 1; if (ofw_bus_is_compatible(dev, "marvell,armada-380-ahci")) ctlr->quirks |= AHCI_Q_MRVL_SR_DEL; /* Allocate memory for controller */ ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE | RF_SHAREABLE); if (ctlr->r_mem == NULL) { device_printf(dev, "Failed to alloc memory for controller\n"); return (ENOMEM); } /* Reset controller */ rc = ahci_ctlr_reset(dev); if (rc != 0) { device_printf(dev, "Failed to reset controller\n"); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (ENXIO); } ahci_mv_regret_config(ctlr); rc = ahci_attach(dev); if (rc != 0) { device_printf(dev, "Failed to initialize AHCI, with error %d\n", rc); return (ENXIO); } return (0); } static device_method_t ahci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ahci_mv_fdt_probe), DEVMETHOD(device_attach, ahci_mv_fdt_attach), DEVMETHOD(device_detach, ahci_detach), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr, ahci_teardown_intr), DEVMETHOD(bus_print_child, ahci_print_child), - DEVMETHOD(bus_child_location_str, ahci_child_location_str), + DEVMETHOD(bus_child_location, ahci_child_location), DEVMETHOD(bus_get_dma_tag, ahci_get_dma_tag), DEVMETHOD_END }; static driver_t ahci_driver = { "ahci", ahci_methods, sizeof(struct ahci_controller) }; DRIVER_MODULE(ahci_mv, simplebus, ahci_driver, ahci_devclass, NULL, NULL); DRIVER_MODULE(ahci_mv, ofwbus, ahci_driver, ahci_devclass, NULL, NULL); diff --git a/sys/dev/ahci/ahci_pci.c b/sys/dev/ahci/ahci_pci.c index ccc107a14bc8..b60119f3ab64 100644 --- a/sys/dev/ahci/ahci_pci.c +++ b/sys/dev/ahci/ahci_pci.c @@ -1,775 +1,775 @@ /*- * Copyright (c) 2009-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ahci.h" static int force_ahci = 1; TUNABLE_INT("hw.ahci.force", &force_ahci); static const struct { uint32_t id; uint8_t rev; const char *name; int quirks; } ahci_ids[] = { {0x43801002, 0x00, "AMD SB600", AHCI_Q_NOMSI | AHCI_Q_ATI_PMP_BUG | AHCI_Q_MAXIO_64K}, {0x43901002, 0x00, "AMD SB7x0/SB8x0/SB9x0", AHCI_Q_ATI_PMP_BUG | AHCI_Q_1MSI}, {0x43911002, 0x00, "AMD SB7x0/SB8x0/SB9x0", AHCI_Q_ATI_PMP_BUG | AHCI_Q_1MSI}, {0x43921002, 0x00, "AMD SB7x0/SB8x0/SB9x0", AHCI_Q_ATI_PMP_BUG | AHCI_Q_1MSI}, {0x43931002, 0x00, "AMD SB7x0/SB8x0/SB9x0", AHCI_Q_ATI_PMP_BUG | AHCI_Q_1MSI}, {0x43941002, 0x00, "AMD SB7x0/SB8x0/SB9x0", AHCI_Q_ATI_PMP_BUG | AHCI_Q_1MSI}, /* Not sure SB8x0/SB9x0 needs this quirk. Be conservative though */ {0x43951002, 0x00, "AMD SB8x0/SB9x0", AHCI_Q_ATI_PMP_BUG}, {0x43b61022, 0x00, "AMD X399", 0}, {0x43b51022, 0x00, "AMD 300 Series", 0}, /* X370 */ {0x43b71022, 0x00, "AMD 300 Series", 0}, /* B350 */ {0x78001022, 0x00, "AMD Hudson-2", 0}, {0x78011022, 0x00, "AMD Hudson-2", 0}, {0x78021022, 0x00, "AMD Hudson-2", 0}, {0x78031022, 0x00, "AMD Hudson-2", 0}, {0x78041022, 0x00, "AMD Hudson-2", 0}, {0x79001022, 0x00, "AMD KERNCZ", 0}, {0x79011022, 0x00, "AMD KERNCZ", 0}, {0x79021022, 0x00, "AMD KERNCZ", 0}, {0x79031022, 0x00, "AMD KERNCZ", 0}, {0x79041022, 0x00, "AMD KERNCZ", 0}, {0x06011b21, 0x00, "ASMedia ASM1060", AHCI_Q_NOCCS|AHCI_Q_NOAUX}, {0x06021b21, 0x00, "ASMedia ASM1060", AHCI_Q_NOCCS|AHCI_Q_NOAUX}, {0x06111b21, 0x00, "ASMedia ASM1061", AHCI_Q_NOCCS|AHCI_Q_NOAUX}, {0x06121b21, 0x00, "ASMedia ASM1062", AHCI_Q_NOCCS|AHCI_Q_NOAUX}, {0x06201b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS|AHCI_Q_NOAUX}, {0x06211b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS|AHCI_Q_NOAUX}, {0x06221b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS|AHCI_Q_NOAUX}, {0x06241b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS|AHCI_Q_NOAUX}, {0x06251b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS|AHCI_Q_NOAUX}, {0x10621b21, 0x00, "ASMedia ASM116x", 0}, {0x10641b21, 0x00, "ASMedia ASM116x", 0}, {0x11641b21, 0x00, "ASMedia ASM116x", 0}, {0x11651b21, 0x00, "ASMedia ASM116x", 0}, {0x11661b21, 0x00, "ASMedia ASM116x", 0}, {0x79011d94, 0x00, "Hygon KERNCZ", 0}, {0x26528086, 0x00, "Intel ICH6", AHCI_Q_NOFORCE}, {0x26538086, 0x00, "Intel ICH6M", AHCI_Q_NOFORCE}, {0x26818086, 0x00, "Intel ESB2", 0}, {0x26828086, 0x00, "Intel ESB2", 0}, {0x26838086, 0x00, "Intel ESB2", 0}, {0x27c18086, 0x00, "Intel ICH7", 0}, {0x27c38086, 0x00, "Intel ICH7", 0}, {0x27c58086, 0x00, "Intel ICH7M", 0}, {0x27c68086, 0x00, "Intel ICH7M", 0}, {0x28218086, 0x00, "Intel ICH8", 0}, {0x28228086, 0x00, "Intel ICH8+ (RAID)", 0}, {0x28248086, 0x00, "Intel ICH8", 0}, {0x28298086, 0x00, "Intel ICH8M", 0}, {0x282a8086, 0x00, "Intel ICH8M+ (RAID)", 0}, {0x29228086, 0x00, "Intel ICH9", 0}, {0x29238086, 0x00, "Intel ICH9", 0}, {0x29248086, 0x00, "Intel ICH9", 0}, {0x29258086, 0x00, "Intel ICH9", 0}, {0x29278086, 0x00, "Intel ICH9", 0}, {0x29298086, 0x00, "Intel ICH9M", 0}, {0x292a8086, 0x00, "Intel ICH9M", 0}, {0x292b8086, 0x00, "Intel ICH9M", 0}, {0x292c8086, 0x00, "Intel ICH9M", 0}, {0x292f8086, 0x00, "Intel ICH9M", 0}, {0x294d8086, 0x00, "Intel ICH9", 0}, {0x294e8086, 0x00, "Intel ICH9M", 0}, {0x3a058086, 0x00, "Intel ICH10 (RAID)", 0}, {0x3a228086, 0x00, "Intel ICH10", 0}, {0x3a258086, 0x00, "Intel ICH10 (RAID)", 0}, {0x3b228086, 0x00, "Intel Ibex Peak", 0}, {0x3b238086, 0x00, "Intel Ibex Peak", 0}, {0x3b258086, 0x00, "Intel Ibex Peak (RAID)", 0}, {0x3b298086, 0x00, "Intel Ibex Peak-M", 0}, {0x3b2c8086, 0x00, "Intel Ibex Peak-M (RAID)", 0}, {0x3b2f8086, 0x00, "Intel Ibex Peak-M", 0}, {0x19b08086, 0x00, "Intel Denverton", 0}, {0x19b18086, 0x00, "Intel Denverton", 0}, {0x19b28086, 0x00, "Intel Denverton", 0}, {0x19b38086, 0x00, "Intel Denverton", 0}, {0x19b48086, 0x00, "Intel Denverton", 0}, {0x19b58086, 0x00, "Intel Denverton", 0}, {0x19b68086, 0x00, "Intel Denverton", 0}, {0x19b78086, 0x00, "Intel Denverton", 0}, {0x19be8086, 0x00, "Intel Denverton", 0}, {0x19bf8086, 0x00, "Intel Denverton", 0}, {0x19c08086, 0x00, "Intel Denverton", 0}, {0x19c18086, 0x00, "Intel Denverton", 0}, {0x19c28086, 0x00, "Intel Denverton", 0}, {0x19c38086, 0x00, "Intel Denverton", 0}, {0x19c48086, 0x00, "Intel Denverton", 0}, {0x19c58086, 0x00, "Intel Denverton", 0}, {0x19c68086, 0x00, "Intel Denverton", 0}, {0x19c78086, 0x00, "Intel Denverton", 0}, {0x19ce8086, 0x00, "Intel Denverton", 0}, {0x19cf8086, 0x00, "Intel Denverton", 0}, {0x1c028086, 0x00, "Intel Cougar Point", 0}, {0x1c038086, 0x00, "Intel Cougar Point", 0}, {0x1c048086, 0x00, "Intel Cougar Point (RAID)", 0}, {0x1c058086, 0x00, "Intel Cougar Point (RAID)", 0}, {0x1c068086, 0x00, "Intel Cougar Point (RAID)", 0}, {0x1d028086, 0x00, "Intel Patsburg", 0}, {0x1d048086, 0x00, "Intel Patsburg", 0}, {0x1d068086, 0x00, "Intel Patsburg", 0}, {0x28268086, 0x00, "Intel Patsburg+ (RAID)", 0}, {0x1e028086, 0x00, "Intel Panther Point", 0}, {0x1e038086, 0x00, "Intel Panther Point", 0}, {0x1e048086, 0x00, "Intel Panther Point (RAID)", 0}, {0x1e058086, 0x00, "Intel Panther Point (RAID)", 0}, {0x1e068086, 0x00, "Intel Panther Point (RAID)", 0}, {0x1e078086, 0x00, "Intel Panther Point (RAID)", 0}, {0x1e0e8086, 0x00, "Intel Panther Point (RAID)", 0}, {0x1e0f8086, 0x00, "Intel Panther Point (RAID)", 0}, {0x1f228086, 0x00, "Intel Avoton", 0}, {0x1f238086, 0x00, "Intel Avoton", 0}, {0x1f248086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f258086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f268086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f278086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f2e8086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f2f8086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f328086, 0x00, "Intel Avoton", 0}, {0x1f338086, 0x00, "Intel Avoton", 0}, {0x1f348086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f358086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f368086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f378086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f3e8086, 0x00, "Intel Avoton (RAID)", 0}, {0x1f3f8086, 0x00, "Intel Avoton (RAID)", 0}, {0x23a38086, 0x00, "Intel Coleto Creek", 0}, {0x31e38086, 0x00, "Intel Gemini Lake", 0}, {0x5ae38086, 0x00, "Intel Apollo Lake", 0}, {0x8c028086, 0x00, "Intel Lynx Point", 0}, {0x8c038086, 0x00, "Intel Lynx Point", 0}, {0x8c048086, 0x00, "Intel Lynx Point (RAID)", 0}, {0x8c058086, 0x00, "Intel Lynx Point (RAID)", 0}, {0x8c068086, 0x00, "Intel Lynx Point (RAID)", 0}, {0x8c078086, 0x00, "Intel Lynx Point (RAID)", 0}, {0x8c0e8086, 0x00, "Intel Lynx Point (RAID)", 0}, {0x8c0f8086, 0x00, "Intel Lynx Point (RAID)", 0}, {0x8c828086, 0x00, "Intel Wildcat Point", 0}, {0x8c838086, 0x00, "Intel Wildcat Point", 0}, {0x8c848086, 0x00, "Intel Wildcat Point (RAID)", 0}, {0x8c858086, 0x00, "Intel Wildcat Point (RAID)", 0}, {0x8c868086, 0x00, "Intel Wildcat Point (RAID)", 0}, {0x8c878086, 0x00, "Intel Wildcat Point (RAID)", 0}, {0x8c8e8086, 0x00, "Intel Wildcat Point (RAID)", 0}, {0x8c8f8086, 0x00, "Intel Wildcat Point (RAID)", 0}, {0x8d028086, 0x00, "Intel Wellsburg", 0}, {0x8d048086, 0x00, "Intel Wellsburg (RAID)", 0}, {0x8d068086, 0x00, "Intel Wellsburg (RAID)", 0}, {0x8d628086, 0x00, "Intel Wellsburg", 0}, {0x8d648086, 0x00, "Intel Wellsburg (RAID)", 0}, {0x8d668086, 0x00, "Intel Wellsburg (RAID)", 0}, {0x8d6e8086, 0x00, "Intel Wellsburg (RAID)", 0}, {0x28238086, 0x00, "Intel Wellsburg+ (RAID)", 0}, {0x28278086, 0x00, "Intel Wellsburg+ (RAID)", 0}, {0x9c028086, 0x00, "Intel Lynx Point-LP", 0}, {0x9c038086, 0x00, "Intel Lynx Point-LP", 0}, {0x9c048086, 0x00, "Intel Lynx Point-LP (RAID)", 0}, {0x9c058086, 0x00, "Intel Lynx Point-LP (RAID)", 0}, {0x9c068086, 0x00, "Intel Lynx Point-LP (RAID)", 0}, {0x9c078086, 0x00, "Intel Lynx Point-LP (RAID)", 0}, {0x9c0e8086, 0x00, "Intel Lynx Point-LP (RAID)", 0}, {0x9c0f8086, 0x00, "Intel Lynx Point-LP (RAID)", 0}, {0x9c838086, 0x00, "Intel Wildcat Point-LP", 0}, {0x9c858086, 0x00, "Intel Wildcat Point-LP (RAID)", 0}, {0x9c878086, 0x00, "Intel Wildcat Point-LP (RAID)", 0}, {0x9c8f8086, 0x00, "Intel Wildcat Point-LP (RAID)", 0}, {0x9d038086, 0x00, "Intel Sunrise Point-LP", 0}, {0x9d058086, 0x00, "Intel Sunrise Point-LP (RAID)", 0}, {0x9d078086, 0x00, "Intel Sunrise Point-LP (RAID)", 0}, {0xa1028086, 0x00, "Intel Sunrise Point", 0}, {0xa1038086, 0x00, "Intel Sunrise Point", 0}, {0xa1058086, 0x00, "Intel Sunrise Point (RAID)", 0}, {0xa1068086, 0x00, "Intel Sunrise Point (RAID)", 0}, {0xa1078086, 0x00, "Intel Sunrise Point (RAID)", 0}, {0xa10f8086, 0x00, "Intel Sunrise Point (RAID)", 0}, {0xa1828086, 0x00, "Intel Lewisburg", 0}, {0xa1868086, 0x00, "Intel Lewisburg (RAID)", 0}, {0xa1d28086, 0x00, "Intel Lewisburg", 0}, {0xa1d68086, 0x00, "Intel Lewisburg (RAID)", 0}, {0xa2028086, 0x00, "Intel Lewisburg", 0}, {0xa2068086, 0x00, "Intel Lewisburg (RAID)", 0}, {0xa2528086, 0x00, "Intel Lewisburg", 0}, {0xa2568086, 0x00, "Intel Lewisburg (RAID)", 0}, {0xa2828086, 0x00, "Intel Union Point", 0}, {0xa2868086, 0x00, "Intel Union Point (RAID)", 0}, {0xa28e8086, 0x00, "Intel Union Point (RAID)", 0}, {0xa3528086, 0x00, "Intel Cannon Lake", 0}, {0xa3538086, 0x00, "Intel Cannon Lake", 0}, {0x23238086, 0x00, "Intel DH89xxCC", 0}, {0x2360197b, 0x00, "JMicron JMB360", 0}, {0x2361197b, 0x00, "JMicron JMB361", AHCI_Q_NOFORCE | AHCI_Q_1CH}, {0x2362197b, 0x00, "JMicron JMB362", 0}, {0x2363197b, 0x00, "JMicron JMB363", AHCI_Q_NOFORCE}, {0x2365197b, 0x00, "JMicron JMB365", AHCI_Q_NOFORCE}, {0x2366197b, 0x00, "JMicron JMB366", AHCI_Q_NOFORCE}, {0x2368197b, 0x00, "JMicron JMB368", AHCI_Q_NOFORCE}, {0x2392197b, 0x00, "JMicron JMB388", AHCI_Q_IOMMU_BUSWIDE}, {0x0585197b, 0x00, "JMicron JMB58x", 0}, {0x01221c28, 0x00, "Lite-On Plextor M6E (Marvell 88SS9183)", AHCI_Q_IOMMU_BUSWIDE}, {0x611111ab, 0x00, "Marvell 88SE6111", AHCI_Q_NOFORCE | AHCI_Q_NOPMP | AHCI_Q_1CH | AHCI_Q_EDGEIS}, {0x612111ab, 0x00, "Marvell 88SE6121", AHCI_Q_NOFORCE | AHCI_Q_NOPMP | AHCI_Q_2CH | AHCI_Q_EDGEIS | AHCI_Q_NONCQ | AHCI_Q_NOCOUNT}, {0x614111ab, 0x00, "Marvell 88SE6141", AHCI_Q_NOFORCE | AHCI_Q_NOPMP | AHCI_Q_4CH | AHCI_Q_EDGEIS | AHCI_Q_NONCQ | AHCI_Q_NOCOUNT}, {0x614511ab, 0x00, "Marvell 88SE6145", AHCI_Q_NOFORCE | AHCI_Q_NOPMP | AHCI_Q_4CH | AHCI_Q_EDGEIS | AHCI_Q_NONCQ | AHCI_Q_NOCOUNT}, {0x91201b4b, 0x00, "Marvell 88SE912x", AHCI_Q_EDGEIS | AHCI_Q_IOMMU_BUSWIDE}, {0x91231b4b, 0x11, "Marvell 88SE912x", AHCI_Q_ALTSIG | AHCI_Q_IOMMU_BUSWIDE}, {0x91231b4b, 0x00, "Marvell 88SE912x", AHCI_Q_EDGEIS | AHCI_Q_SATA2 | AHCI_Q_IOMMU_BUSWIDE}, {0x91251b4b, 0x00, "Marvell 88SE9125", 0}, {0x91281b4b, 0x00, "Marvell 88SE9128", AHCI_Q_ALTSIG | AHCI_Q_IOMMU_BUSWIDE}, {0x91301b4b, 0x00, "Marvell 88SE9130", AHCI_Q_ALTSIG | AHCI_Q_IOMMU_BUSWIDE}, {0x91701b4b, 0x00, "Marvell 88SE9170", AHCI_Q_IOMMU_BUSWIDE}, {0x91721b4b, 0x00, "Marvell 88SE9172", AHCI_Q_IOMMU_BUSWIDE}, {0x917a1b4b, 0x00, "Marvell 88SE917A", AHCI_Q_IOMMU_BUSWIDE}, {0x91821b4b, 0x00, "Marvell 88SE9182", AHCI_Q_IOMMU_BUSWIDE}, {0x91831b4b, 0x00, "Marvell 88SS9183", AHCI_Q_IOMMU_BUSWIDE}, {0x91a01b4b, 0x00, "Marvell 88SE91Ax", AHCI_Q_IOMMU_BUSWIDE}, {0x92151b4b, 0x00, "Marvell 88SE9215", 0}, {0x92201b4b, 0x00, "Marvell 88SE9220", AHCI_Q_ALTSIG | AHCI_Q_IOMMU_BUSWIDE}, {0x92301b4b, 0x00, "Marvell 88SE9230", AHCI_Q_ALTSIG | AHCI_Q_IOMMU_BUSWIDE}, {0x92351b4b, 0x00, "Marvell 88SE9235", 0}, {0x06201103, 0x00, "HighPoint RocketRAID 620", 0}, {0x06201b4b, 0x00, "HighPoint RocketRAID 620", 0}, {0x06221103, 0x00, "HighPoint RocketRAID 622", 0}, {0x06221b4b, 0x00, "HighPoint RocketRAID 622", 0}, {0x06401103, 0x00, "HighPoint RocketRAID 640", 0}, {0x06401b4b, 0x00, "HighPoint RocketRAID 640", 0}, {0x06441103, 0x00, "HighPoint RocketRAID 644", 0}, {0x06441b4b, 0x00, "HighPoint RocketRAID 644", 0}, {0x06411103, 0x00, "HighPoint RocketRAID 640L", 0}, {0x06421103, 0x00, "HighPoint RocketRAID 642L", AHCI_Q_IOMMU_BUSWIDE}, {0x06451103, 0x00, "HighPoint RocketRAID 644L", AHCI_Q_IOMMU_BUSWIDE}, {0x044c10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x044d10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x044e10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x044f10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x045c10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x045d10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x045e10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x045f10de, 0x00, "NVIDIA MCP65", AHCI_Q_NOAA}, {0x055010de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055110de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055210de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055310de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055410de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055510de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055610de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055710de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055810de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055910de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055A10de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x055B10de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x058410de, 0x00, "NVIDIA MCP67", AHCI_Q_NOAA}, {0x07f010de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f110de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f210de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f310de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f410de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f510de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f610de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f710de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f810de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07f910de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07fa10de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x07fb10de, 0x00, "NVIDIA MCP73", AHCI_Q_NOAA}, {0x0ad010de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad110de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad210de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad310de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad410de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad510de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad610de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad710de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad810de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ad910de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ada10de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0adb10de, 0x00, "NVIDIA MCP77", AHCI_Q_NOAA}, {0x0ab410de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab510de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab610de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab710de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab810de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0ab910de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0aba10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abb10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abc10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abd10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abe10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0abf10de, 0x00, "NVIDIA MCP79", AHCI_Q_NOAA}, {0x0d8410de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8510de, 0x00, "NVIDIA MCP89", AHCI_Q_NOFORCE|AHCI_Q_NOAA}, {0x0d8610de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8710de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8810de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8910de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8a10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8b10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8c10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8d10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8e10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x0d8f10de, 0x00, "NVIDIA MCP89", AHCI_Q_NOAA}, {0x3781105a, 0x00, "Promise TX8660", 0}, {0x33491106, 0x00, "VIA VT8251", AHCI_Q_NOPMP|AHCI_Q_NONCQ}, {0x62871106, 0x00, "VIA VT8251", AHCI_Q_NOPMP|AHCI_Q_NONCQ}, {0x11841039, 0x00, "SiS 966", 0}, {0x11851039, 0x00, "SiS 968", 0}, {0x01861039, 0x00, "SiS 968", 0}, {0xa01c177d, 0x00, "ThunderX", AHCI_Q_ABAR0|AHCI_Q_1MSI}, {0x00311c36, 0x00, "Annapurna", AHCI_Q_FORCE_PI|AHCI_Q_RESTORE_CAP|AHCI_Q_NOMSIX}, {0x1600144d, 0x00, "Samsung", AHCI_Q_NOMSI}, {0x00000000, 0x00, NULL, 0} }; static int ahci_pci_ctlr_reset(device_t dev) { return(ahci_ctlr_reset(dev)); } static int ahci_probe(device_t dev) { char buf[64]; int i, valid = 0; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); /* * Ensure it is not a PCI bridge (some vendors use * the same PID and VID in PCI bridge and AHCI cards). */ if (pci_get_class(dev) == PCIC_BRIDGE) return (ENXIO); /* Is this a possible AHCI candidate? */ if (pci_get_class(dev) == PCIC_STORAGE && pci_get_subclass(dev) == PCIS_STORAGE_SATA && pci_get_progif(dev) == PCIP_STORAGE_SATA_AHCI_1_0) valid = 1; else if (pci_get_class(dev) == PCIC_STORAGE && pci_get_subclass(dev) == PCIS_STORAGE_RAID) valid = 2; /* Is this a known AHCI chip? */ for (i = 0; ahci_ids[i].id != 0; i++) { if (ahci_ids[i].id == devid && ahci_ids[i].rev <= revid && (valid || (force_ahci == 1 && !(ahci_ids[i].quirks & AHCI_Q_NOFORCE)))) { /* Do not attach JMicrons with single PCI function. */ if (pci_get_vendor(dev) == 0x197b && (ahci_ids[i].quirks & AHCI_Q_NOFORCE) && (pci_read_config(dev, 0xdf, 1) & 0x40) == 0) return (ENXIO); snprintf(buf, sizeof(buf), "%s AHCI SATA controller", ahci_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } if (valid != 1) return (ENXIO); device_set_desc_copy(dev, "AHCI SATA controller"); return (BUS_PROBE_DEFAULT); } static int ahci_ata_probe(device_t dev) { char buf[64]; int i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); if ((intptr_t)device_get_ivars(dev) >= 0) return (ENXIO); /* Is this a known AHCI chip? */ for (i = 0; ahci_ids[i].id != 0; i++) { if (ahci_ids[i].id == devid && ahci_ids[i].rev <= revid) { snprintf(buf, sizeof(buf), "%s AHCI SATA controller", ahci_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } device_set_desc_copy(dev, "AHCI SATA controller"); return (BUS_PROBE_DEFAULT); } static int ahci_pci_read_msix_bars(device_t dev, uint8_t *table_bar, uint8_t *pba_bar) { int cap_offset = 0, ret; uint32_t val; if ((table_bar == NULL) || (pba_bar == NULL)) return (EINVAL); ret = pci_find_cap(dev, PCIY_MSIX, &cap_offset); if (ret != 0) return (EINVAL); val = pci_read_config(dev, cap_offset + PCIR_MSIX_TABLE, 4); *table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); val = pci_read_config(dev, cap_offset + PCIR_MSIX_PBA, 4); *pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); return (0); } static int ahci_pci_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int error, i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); int msi_count, msix_count; uint8_t table_bar = 0, pba_bar = 0; uint32_t caps, pi; msi_count = pci_msi_count(dev); msix_count = pci_msix_count(dev); i = 0; while (ahci_ids[i].id != 0 && (ahci_ids[i].id != devid || ahci_ids[i].rev > revid)) i++; ctlr->quirks = ahci_ids[i].quirks; if (ctlr->quirks & AHCI_Q_IOMMU_BUSWIDE) { /* * The controller issues DMA requests from PCI function 1, * but the device is not multifunction. * Ref: https://bugzilla.kernel.org/show_bug.cgi?id=42679 */ bus_dma_iommu_set_buswide(dev); } /* Limit speed for my onboard JMicron external port. * It is not eSATA really, limit to SATA 1 */ if (pci_get_devid(dev) == 0x2363197b && pci_get_subvendor(dev) == 0x1043 && pci_get_subdevice(dev) == 0x81e4) ctlr->quirks |= AHCI_Q_SATA1_UNIT0; resource_int_value(device_get_name(dev), device_get_unit(dev), "quirks", &ctlr->quirks); ctlr->vendorid = pci_get_vendor(dev); ctlr->deviceid = pci_get_device(dev); ctlr->subvendorid = pci_get_subvendor(dev); ctlr->subdeviceid = pci_get_subdevice(dev); /* Default AHCI Base Address is BAR(5), Cavium uses BAR(0) */ if (ctlr->quirks & AHCI_Q_ABAR0) ctlr->r_rid = PCIR_BAR(0); else ctlr->r_rid = PCIR_BAR(5); if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return ENXIO; /* * Intel RAID hardware can remap NVMe devices inside its BAR. * Try to detect this. Either we have to add the device * here, or the user has to change the mode in the BIOS * from RST to AHCI. */ if (pci_get_vendor(dev) == 0x8086) { uint32_t vscap; vscap = ATA_INL(ctlr->r_mem, AHCI_VSCAP); if (vscap & 1) { uint32_t cap = ATA_INL(ctlr->r_mem, 0x800); /* Intel's REMAP CAP */ int i; ctlr->remap_offset = 0x4000; ctlr->remap_size = 0x4000; /* * Check each of the devices that might be remapped to * make sure they are an nvme device. At the present, * nvme are the only known devices remapped. */ for (i = 0; i < 3; i++) { if (cap & (1 << i) && (ATA_INL(ctlr->r_mem, 0x880 + i * 0x80) == ((PCIC_STORAGE << 16) | (PCIS_STORAGE_NVM << 8) | PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0))) { ctlr->remapped_devices++; } } /* If we have any remapped device, disable MSI */ if (ctlr->remapped_devices > 0) { device_printf(dev, "Detected %d nvme remapped devices\n", ctlr->remapped_devices); ctlr->quirks |= (AHCI_Q_NOMSIX | AHCI_Q_NOMSI); } } } if (ctlr->quirks & AHCI_Q_NOMSIX) msix_count = 0; /* Read MSI-x BAR IDs if supported */ if (msix_count > 0) { error = ahci_pci_read_msix_bars(dev, &table_bar, &pba_bar); if (error == 0) { ctlr->r_msix_tab_rid = table_bar; ctlr->r_msix_pba_rid = pba_bar; } else { /* Failed to read BARs, disable MSI-x */ msix_count = 0; } } /* Allocate resources for MSI-x table and PBA */ if (msix_count > 0) { /* * Allocate new MSI-x table only if not * allocated before. */ ctlr->r_msix_table = NULL; if (ctlr->r_msix_tab_rid != ctlr->r_rid) { /* Separate BAR for MSI-x */ ctlr->r_msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_msix_tab_rid, RF_ACTIVE); if (ctlr->r_msix_table == NULL) { ahci_free_mem(dev); return (ENXIO); } } /* * Allocate new PBA table only if not * allocated before. */ ctlr->r_msix_pba = NULL; if ((ctlr->r_msix_pba_rid != ctlr->r_msix_tab_rid) && (ctlr->r_msix_pba_rid != ctlr->r_rid)) { /* Separate BAR for PBA */ ctlr->r_msix_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_msix_pba_rid, RF_ACTIVE); if (ctlr->r_msix_pba == NULL) { ahci_free_mem(dev); return (ENXIO); } } } pci_enable_busmaster(dev); /* Reset controller */ if ((error = ahci_pci_ctlr_reset(dev)) != 0) { ahci_free_mem(dev); return (error); } /* Setup interrupts. */ /* Setup MSI register parameters */ /* Process hints. */ caps = ATA_INL(ctlr->r_mem, AHCI_CAP); pi = ATA_INL(ctlr->r_mem, AHCI_PI); if (ctlr->quirks & AHCI_Q_NOMSI) ctlr->msi = 0; else if ((ctlr->quirks & AHCI_Q_1MSI) || ((caps & (AHCI_CAP_NPMASK | AHCI_CAP_CCCS)) == 0 && pi == 1)) ctlr->msi = 1; else ctlr->msi = 2; resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &ctlr->msi); ctlr->numirqs = 1; if (msi_count == 0 && msix_count == 0) ctlr->msi = 0; if (ctlr->msi < 0) ctlr->msi = 0; else if (ctlr->msi == 1) { msi_count = min(1, msi_count); msix_count = min(1, msix_count); } else if (ctlr->msi > 1) ctlr->msi = 2; /* Allocate MSI/MSI-x if needed/present. */ if (ctlr->msi > 0) { error = ENXIO; /* Try to allocate MSI-x first */ if (msix_count > 0) { error = pci_alloc_msix(dev, &msix_count); if (error == 0) ctlr->numirqs = msix_count; } /* * Try to allocate MSI if msi_count is greater than 0 * and if MSI-x allocation failed. */ if ((error != 0) && (msi_count > 0)) { error = pci_alloc_msi(dev, &msi_count); if (error == 0) ctlr->numirqs = msi_count; } /* Both MSI and MSI-x allocations failed */ if (error != 0) { ctlr->msi = 0; device_printf(dev, "Failed to allocate MSI/MSI-x, " "falling back to INTx\n"); } } error = ahci_attach(dev); if (error != 0) { if (ctlr->msi > 0) pci_release_msi(dev); ahci_free_mem(dev); } return error; } static int ahci_pci_detach(device_t dev) { ahci_detach(dev); pci_release_msi(dev); return (0); } static int ahci_pci_suspend(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); bus_generic_suspend(dev); /* Disable interupts, so the state change(s) doesn't trigger */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, ATA_INL(ctlr->r_mem, AHCI_GHC) & (~AHCI_GHC_IE)); return 0; } static int ahci_pci_resume(device_t dev) { int res; if ((res = ahci_pci_ctlr_reset(dev)) != 0) return (res); ahci_ctlr_setup(dev); return (bus_generic_resume(dev)); } static device_method_t ahci_methods[] = { DEVMETHOD(device_probe, ahci_probe), DEVMETHOD(device_attach, ahci_pci_attach), DEVMETHOD(device_detach, ahci_pci_detach), DEVMETHOD(device_suspend, ahci_pci_suspend), DEVMETHOD(device_resume, ahci_pci_resume), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr,ahci_teardown_intr), - DEVMETHOD(bus_child_location_str, ahci_child_location_str), + DEVMETHOD(bus_child_location, ahci_child_location), DEVMETHOD(bus_get_dma_tag, ahci_get_dma_tag), DEVMETHOD_END }; static driver_t ahci_driver = { "ahci", ahci_methods, sizeof(struct ahci_controller) }; DRIVER_MODULE(ahci, pci, ahci_driver, ahci_devclass, NULL, NULL); /* Also matches class / subclass / progid XXX need to add when we have masking support */ MODULE_PNP_INFO("W32:vendor/device", pci, ahci, ahci_ids, nitems(ahci_ids) - 1); static device_method_t ahci_ata_methods[] = { DEVMETHOD(device_probe, ahci_ata_probe), DEVMETHOD(device_attach, ahci_pci_attach), DEVMETHOD(device_detach, ahci_pci_detach), DEVMETHOD(device_suspend, ahci_pci_suspend), DEVMETHOD(device_resume, ahci_pci_resume), DEVMETHOD(bus_print_child, ahci_print_child), DEVMETHOD(bus_alloc_resource, ahci_alloc_resource), DEVMETHOD(bus_release_resource, ahci_release_resource), DEVMETHOD(bus_setup_intr, ahci_setup_intr), DEVMETHOD(bus_teardown_intr,ahci_teardown_intr), - DEVMETHOD(bus_child_location_str, ahci_child_location_str), + DEVMETHOD(bus_child_location, ahci_child_location), DEVMETHOD_END }; static driver_t ahci_ata_driver = { "ahci", ahci_ata_methods, sizeof(struct ahci_controller) }; DRIVER_MODULE(ahci, atapci, ahci_ata_driver, ahci_devclass, NULL, NULL); diff --git a/sys/dev/ata/ata-pci.c b/sys/dev/ata/ata-pci.c index 578f719f40e4..d8f97158709b 100644 --- a/sys/dev/ata/ata-pci.c +++ b/sys/dev/ata/ata-pci.c @@ -1,931 +1,931 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1998 - 2008 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE(M_ATAPCI, "ata_pci", "ATA driver PCI"); /* misc defines */ #define IOMASK 0xfffffffc /* * generic PCI ATA device probe */ int ata_pci_probe(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); char buffer[64]; /* is this a storage class device ? */ if (pci_get_class(dev) != PCIC_STORAGE) return (ENXIO); /* is this an IDE/ATA type device ? */ if (pci_get_subclass(dev) != PCIS_STORAGE_IDE) return (ENXIO); sprintf(buffer, "%s ATA controller", ata_pcivendor2str(dev)); device_set_desc_copy(dev, buffer); ctlr->chipinit = ata_generic_chipinit; /* we are a low priority handler */ return (BUS_PROBE_GENERIC); } int ata_pci_attach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); device_t child; u_int32_t cmd; int unit; /* do chipset specific setups only needed once */ ctlr->legacy = ata_legacy(dev); if (ctlr->legacy || pci_read_config(dev, PCIR_BAR(2), 4) & IOMASK) ctlr->channels = 2; else ctlr->channels = 1; ctlr->ichannels = -1; ctlr->ch_attach = ata_pci_ch_attach; ctlr->ch_detach = ata_pci_ch_detach; ctlr->dev = dev; /* if needed try to enable busmastering */ pci_enable_busmaster(dev); cmd = pci_read_config(dev, PCIR_COMMAND, 2); /* if busmastering mode "stuck" use it */ if ((cmd & PCIM_CMD_BUSMASTEREN) == PCIM_CMD_BUSMASTEREN) { ctlr->r_type1 = SYS_RES_IOPORT; ctlr->r_rid1 = ATA_BMADDR_RID; ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1, &ctlr->r_rid1, RF_ACTIVE); } if (ctlr->chipinit(dev)) { if (ctlr->r_res1) bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1, ctlr->r_res1); return ENXIO; } /* attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { if ((ctlr->ichannels & (1 << unit)) == 0) continue; child = device_add_child(dev, "ata", ((unit == 0 || unit == 1) && ctlr->legacy) ? unit : devclass_find_free_unit(ata_devclass, 2)); if (child == NULL) device_printf(dev, "failed to add ata child device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); } bus_generic_attach(dev); return 0; } int ata_pci_detach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); /* detach & delete all children */ device_delete_children(dev); if (ctlr->r_irq) { bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->r_irq_rid, ctlr->r_irq); if (ctlr->r_irq_rid != ATA_IRQ_RID) pci_release_msi(dev); } if (ctlr->chipdeinit != NULL) ctlr->chipdeinit(dev); if (ctlr->r_res2) { bus_release_resource(dev, ctlr->r_type2, ctlr->r_rid2, ctlr->r_res2); } if (ctlr->r_res1) { bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1, ctlr->r_res1); } return 0; } int ata_pci_suspend(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); int error = 0; bus_generic_suspend(dev); if (ctlr->suspend) error = ctlr->suspend(dev); return error; } int ata_pci_resume(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); int error = 0; if (ctlr->resume) error = ctlr->resume(dev); bus_generic_resume(dev); return error; } int ata_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { return (BUS_READ_IVAR(device_get_parent(dev), dev, which, result)); } int ata_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { return (BUS_WRITE_IVAR(device_get_parent(dev), dev, which, value)); } uint32_t ata_pci_read_config(device_t dev, device_t child, int reg, int width) { return (pci_read_config(dev, reg, width)); } void ata_pci_write_config(device_t dev, device_t child, int reg, uint32_t val, int width) { pci_write_config(dev, reg, val, width); } struct resource * ata_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ata_pci_controller *controller = device_get_softc(dev); struct resource *res = NULL; if (device_get_devclass(child) == ata_devclass) { int unit = ((struct ata_channel *)device_get_softc(child))->unit; int myrid; if (type == SYS_RES_IOPORT) { switch (*rid) { case ATA_IOADDR_RID: if (controller->legacy) { start = (unit ? ATA_SECONDARY : ATA_PRIMARY); count = ATA_IOSIZE; end = start + count - 1; } myrid = PCIR_BAR(0) + (unit << 3); res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, &myrid, start, end, count, flags); break; case ATA_CTLADDR_RID: if (controller->legacy) { start = (unit ? ATA_SECONDARY : ATA_PRIMARY) + ATA_CTLOFFSET; count = ATA_CTLIOSIZE; end = start + count - 1; } myrid = PCIR_BAR(1) + (unit << 3); res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, &myrid, start, end, count, flags); break; } } if (type == SYS_RES_IRQ && *rid == ATA_IRQ_RID) { if (controller->legacy) { int irq = (unit == 0 ? 14 : 15); res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, SYS_RES_IRQ, rid, irq, irq, 1, flags); } else res = controller->r_irq; } } else { if (type == SYS_RES_IRQ) { if (*rid != ATA_IRQ_RID) return (NULL); res = controller->r_irq; } else { res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, type, rid, start, end, count, flags); } } return (res); } int ata_pci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { if (device_get_devclass(child) == ata_devclass) { struct ata_pci_controller *controller = device_get_softc(dev); int unit = ((struct ata_channel *)device_get_softc(child))->unit; if (type == SYS_RES_IOPORT) { switch (rid) { case ATA_IOADDR_RID: return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, PCIR_BAR(0) + (unit << 3), r); case ATA_CTLADDR_RID: return BUS_RELEASE_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, PCIR_BAR(1) + (unit << 3), r); default: return ENOENT; } } if (type == SYS_RES_IRQ) { if (rid != ATA_IRQ_RID) return ENOENT; if (controller->legacy) { return BUS_RELEASE_RESOURCE(device_get_parent(dev), child, SYS_RES_IRQ, rid, r); } else return 0; } } else { if (type == SYS_RES_IRQ) { if (rid != ATA_IRQ_RID) return (ENOENT); return (0); } else { return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r)); } } return (EINVAL); } int ata_pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct ata_pci_controller *controller = device_get_softc(dev); if (controller->legacy) { return BUS_SETUP_INTR(device_get_parent(dev), child, irq, flags, filter, function, argument, cookiep); } else { struct ata_pci_controller *controller = device_get_softc(dev); int unit; if (filter != NULL) { printf("ata-pci.c: we cannot use a filter here\n"); return (EINVAL); } if (device_get_devclass(child) == ata_devclass) unit = ((struct ata_channel *)device_get_softc(child))->unit; else unit = ATA_PCI_MAX_CH - 1; controller->interrupt[unit].function = function; controller->interrupt[unit].argument = argument; *cookiep = controller; return 0; } } int ata_pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct ata_pci_controller *controller = device_get_softc(dev); if (controller->legacy) { return BUS_TEARDOWN_INTR(device_get_parent(dev), child, irq, cookie); } else { struct ata_pci_controller *controller = device_get_softc(dev); int unit; if (device_get_devclass(child) == ata_devclass) unit = ((struct ata_channel *)device_get_softc(child))->unit; else unit = ATA_PCI_MAX_CH - 1; controller->interrupt[unit].function = NULL; controller->interrupt[unit].argument = NULL; return 0; } } int ata_generic_setmode(device_t dev, int target, int mode) { return (min(mode, ATA_UDMA2)); } int ata_generic_chipinit(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); if (ata_setup_interrupt(dev, ata_generic_intr)) return ENXIO; ctlr->setmode = ata_generic_setmode; return 0; } int ata_pci_ch_attach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); struct resource *io = NULL, *ctlio = NULL; int i, rid; rid = ATA_IOADDR_RID; if (!(io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE))) return ENXIO; rid = ATA_CTLADDR_RID; if (!(ctlio = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,RF_ACTIVE))){ bus_release_resource(dev, SYS_RES_IOPORT, ATA_IOADDR_RID, io); return ENXIO; } ata_pci_dmainit(dev); for (i = ATA_DATA; i <= ATA_COMMAND; i ++) { ch->r_io[i].res = io; ch->r_io[i].offset = i; } ch->r_io[ATA_CONTROL].res = ctlio; ch->r_io[ATA_CONTROL].offset = ctlr->legacy ? 0 : 2; ch->r_io[ATA_IDX_ADDR].res = io; ata_default_registers(dev); if (ctlr->r_res1) { for (i = ATA_BMCMD_PORT; i <= ATA_BMDTP_PORT; i++) { ch->r_io[i].res = ctlr->r_res1; ch->r_io[i].offset = (i - ATA_BMCMD_PORT) + (ch->unit*ATA_BMIOSIZE); } } ata_pci_hw(dev); return 0; } int ata_pci_ch_detach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_pci_dmafini(dev); bus_release_resource(dev, SYS_RES_IOPORT, ATA_CTLADDR_RID, ch->r_io[ATA_CONTROL].res); bus_release_resource(dev, SYS_RES_IOPORT, ATA_IOADDR_RID, ch->r_io[ATA_IDX_ADDR].res); return (0); } int ata_pci_status(device_t dev) { struct ata_pci_controller *controller = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); if ((dumping || !controller->legacy) && ((ch->flags & ATA_ALWAYS_DMASTAT) || (ch->dma.flags & ATA_DMA_ACTIVE))) { int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; if ((bmstat & ATA_BMSTAT_INTERRUPT) == 0) return 0; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, bmstat & ~ATA_BMSTAT_ERROR); DELAY(1); } if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) { DELAY(100); if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) return 0; } return 1; } void ata_pci_hw(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_generic_hw(dev); ch->hw.status = ata_pci_status; } static int ata_pci_dmastart(struct ata_request *request) { struct ata_channel *ch = device_get_softc(request->parent); ATA_DEBUG_RQ(request, "dmastart"); ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, (ATA_IDX_INB(ch, ATA_BMSTAT_PORT) | (ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR))); ATA_IDX_OUTL(ch, ATA_BMDTP_PORT, request->dma->sg_bus); ch->dma.flags |= ATA_DMA_ACTIVE; ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, (ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_WRITE_READ) | ((request->flags & ATA_R_READ) ? ATA_BMCMD_WRITE_READ : 0)| ATA_BMCMD_START_STOP); return 0; } static int ata_pci_dmastop(struct ata_request *request) { struct ata_channel *ch = device_get_softc(request->parent); int error; ATA_DEBUG_RQ(request, "dmastop"); ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP); ch->dma.flags &= ~ATA_DMA_ACTIVE; error = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR); return error; } static void ata_pci_dmareset(device_t dev) { struct ata_channel *ch = device_get_softc(dev); struct ata_request *request; ATA_IDX_OUTB(ch, ATA_BMCMD_PORT, ATA_IDX_INB(ch, ATA_BMCMD_PORT) & ~ATA_BMCMD_START_STOP); ch->dma.flags &= ~ATA_DMA_ACTIVE; ATA_IDX_OUTB(ch, ATA_BMSTAT_PORT, ATA_BMSTAT_INTERRUPT | ATA_BMSTAT_ERROR); if ((request = ch->running)) { device_printf(dev, "DMA reset calling unload\n"); ch->dma.unload(request); } } void ata_pci_dmainit(device_t dev) { struct ata_channel *ch = device_get_softc(dev); ata_dmainit(dev); ch->dma.start = ata_pci_dmastart; ch->dma.stop = ata_pci_dmastop; ch->dma.reset = ata_pci_dmareset; } void ata_pci_dmafini(device_t dev) { ata_dmafini(dev); } int ata_pci_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at channel %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } int -ata_pci_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) +ata_pci_child_location(device_t dev, device_t child, struct sbuf *sb) { - snprintf(buf, buflen, "channel=%d", + sbuf_printf(sb, "channel=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static bus_dma_tag_t ata_pci_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } static device_method_t ata_pci_methods[] = { /* device interface */ DEVMETHOD(device_probe, ata_pci_probe), DEVMETHOD(device_attach, ata_pci_attach), DEVMETHOD(device_detach, ata_pci_detach), DEVMETHOD(device_suspend, ata_pci_suspend), DEVMETHOD(device_resume, ata_pci_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* bus methods */ DEVMETHOD(bus_read_ivar, ata_pci_read_ivar), DEVMETHOD(bus_write_ivar, ata_pci_write_ivar), DEVMETHOD(bus_alloc_resource, ata_pci_alloc_resource), DEVMETHOD(bus_release_resource, ata_pci_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, ata_pci_setup_intr), DEVMETHOD(bus_teardown_intr, ata_pci_teardown_intr), DEVMETHOD(pci_read_config, ata_pci_read_config), DEVMETHOD(pci_write_config, ata_pci_write_config), DEVMETHOD(bus_print_child, ata_pci_print_child), - DEVMETHOD(bus_child_location_str, ata_pci_child_location_str), + DEVMETHOD(bus_child_location, ata_pci_child_location), DEVMETHOD(bus_get_dma_tag, ata_pci_get_dma_tag), DEVMETHOD_END }; devclass_t ata_pci_devclass; static driver_t ata_pci_driver = { "atapci", ata_pci_methods, sizeof(struct ata_pci_controller), }; DRIVER_MODULE(atapci, pci, ata_pci_driver, ata_pci_devclass, NULL, NULL); MODULE_VERSION(atapci, 1); MODULE_DEPEND(atapci, ata, 1, 1, 1); static int ata_pcichannel_probe(device_t dev) { if ((intptr_t)device_get_ivars(dev) < 0) return (ENXIO); device_set_desc(dev, "ATA channel"); return ata_probe(dev); } static int ata_pcichannel_attach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int error; if (ch->attached) return (0); ch->attached = 1; ch->dev = dev; ch->unit = (intptr_t)device_get_ivars(dev); resource_int_value(device_get_name(dev), device_get_unit(dev), "pm_level", &ch->pm_level); if ((error = ctlr->ch_attach(dev))) return error; return ata_attach(dev); } static int ata_pcichannel_detach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int error; if (!ch->attached) return (0); ch->attached = 0; if ((error = ata_detach(dev))) return error; if (ctlr->ch_detach) return (ctlr->ch_detach(dev)); return (0); } static int ata_pcichannel_suspend(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int error; if (!ch->attached) return (0); if ((error = ata_suspend(dev))) return (error); if (ctlr->ch_suspend != NULL && (error = ctlr->ch_suspend(dev))) return (error); return (0); } static int ata_pcichannel_resume(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int error; if (!ch->attached) return (0); if (ctlr->ch_resume != NULL && (error = ctlr->ch_resume(dev))) return (error); return ata_resume(dev); } static void ata_pcichannel_reset(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); /* if DMA engine present reset it */ if (ch->dma.reset) ch->dma.reset(dev); /* reset the controller HW */ if (ctlr->reset) ctlr->reset(dev); else ata_generic_reset(dev); } static int ata_pcichannel_setmode(device_t dev, int target, int mode) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); if (ctlr->setmode) return (ctlr->setmode(dev, target, mode)); else return (ata_generic_setmode(dev, target, mode)); } static int ata_pcichannel_getrev(device_t dev, int target) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); if (ch->flags & ATA_SATA) { if (ctlr->getrev) return (ctlr->getrev(dev, target)); else return (0xff); } else return (0); } static device_method_t ata_pcichannel_methods[] = { /* device interface */ DEVMETHOD(device_probe, ata_pcichannel_probe), DEVMETHOD(device_attach, ata_pcichannel_attach), DEVMETHOD(device_detach, ata_pcichannel_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, ata_pcichannel_suspend), DEVMETHOD(device_resume, ata_pcichannel_resume), /* ATA methods */ DEVMETHOD(ata_setmode, ata_pcichannel_setmode), DEVMETHOD(ata_getrev, ata_pcichannel_getrev), DEVMETHOD(ata_reset, ata_pcichannel_reset), DEVMETHOD_END }; driver_t ata_pcichannel_driver = { "ata", ata_pcichannel_methods, sizeof(struct ata_channel), }; DRIVER_MODULE(ata, atapci, ata_pcichannel_driver, ata_devclass, NULL, NULL); /* * misc support fucntions */ int ata_legacy(device_t dev) { return (((pci_read_config(dev, PCIR_SUBCLASS, 1) == PCIS_STORAGE_IDE) && (pci_read_config(dev, PCIR_PROGIF, 1)&PCIP_STORAGE_IDE_MASTERDEV)&& ((pci_read_config(dev, PCIR_PROGIF, 1) & (PCIP_STORAGE_IDE_MODEPRIM | PCIP_STORAGE_IDE_MODESEC)) != (PCIP_STORAGE_IDE_MODEPRIM | PCIP_STORAGE_IDE_MODESEC))) || (!pci_read_config(dev, PCIR_BAR(0), 4) && !pci_read_config(dev, PCIR_BAR(1), 4) && !pci_read_config(dev, PCIR_BAR(2), 4) && !pci_read_config(dev, PCIR_BAR(3), 4) && !pci_read_config(dev, PCIR_BAR(5), 4))); } void ata_generic_intr(void *data) { struct ata_pci_controller *ctlr = data; struct ata_channel *ch; int unit; for (unit = 0; unit < ATA_PCI_MAX_CH; unit++) { if ((ch = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(ch); } } int ata_setup_interrupt(device_t dev, void *intr_func) { struct ata_pci_controller *ctlr = device_get_softc(dev); int i, msi = 0; if (!ctlr->legacy) { if (resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &i) == 0 && i != 0) msi = 1; if (msi && pci_msi_count(dev) > 0 && pci_alloc_msi(dev, &msi) == 0) { ctlr->r_irq_rid = 0x1; } else { msi = 0; ctlr->r_irq_rid = ATA_IRQ_RID; } if (!(ctlr->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); if (msi) pci_release_msi(dev); return ENXIO; } if ((bus_setup_intr(dev, ctlr->r_irq, ATA_INTR_FLAGS, NULL, intr_func, ctlr, &ctlr->handle))) { device_printf(dev, "unable to setup interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, ctlr->r_irq_rid, ctlr->r_irq); if (msi) pci_release_msi(dev); return ENXIO; } } return 0; } void ata_set_desc(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); char buffer[128]; sprintf(buffer, "%s %s %s controller", ata_pcivendor2str(dev), ctlr->chip->text, ata_mode2str(ctlr->chip->max_dma)); device_set_desc_copy(dev, buffer); } const struct ata_chip_id * ata_match_chip(device_t dev, const struct ata_chip_id *index) { uint32_t devid; uint8_t revid; devid = pci_get_devid(dev); revid = pci_get_revid(dev); while (index->chipid != 0) { if (devid == index->chipid && revid >= index->chiprev) return (index); index++; } return (NULL); } const struct ata_chip_id * ata_find_chip(device_t dev, const struct ata_chip_id *index, int slot) { const struct ata_chip_id *idx; device_t *children; int nchildren, i; uint8_t s; if (device_get_children(device_get_parent(dev), &children, &nchildren)) return (NULL); for (i = 0; i < nchildren; i++) { s = pci_get_slot(children[i]); if ((slot >= 0 && s == slot) || (slot < 0 && s <= -slot)) { idx = ata_match_chip(children[i], index); if (idx != NULL) { free(children, M_TEMP); return (idx); } } } free(children, M_TEMP); return (NULL); } const char * ata_pcivendor2str(device_t dev) { switch (pci_get_vendor(dev)) { case ATA_ACARD_ID: return "Acard"; case ATA_ACER_LABS_ID: return "AcerLabs"; case ATA_AMD_ID: return "AMD"; case ATA_ADAPTEC_ID: return "Adaptec"; case ATA_ATI_ID: return "ATI"; case ATA_CYRIX_ID: return "Cyrix"; case ATA_CYPRESS_ID: return "Cypress"; case ATA_HIGHPOINT_ID: return "HighPoint"; case ATA_INTEL_ID: return "Intel"; case ATA_ITE_ID: return "ITE"; case ATA_JMICRON_ID: return "JMicron"; case ATA_MARVELL_ID: return "Marvell"; case ATA_MARVELL2_ID: return "Marvell"; case ATA_NATIONAL_ID: return "National"; case ATA_NETCELL_ID: return "Netcell"; case ATA_NVIDIA_ID: return "nVidia"; case ATA_PROMISE_ID: return "Promise"; case ATA_SERVERWORKS_ID: return "ServerWorks"; case ATA_SILICON_IMAGE_ID: return "SiI"; case ATA_SIS_ID: return "SiS"; case ATA_VIA_ID: return "VIA"; case ATA_CENATEK_ID: return "Cenatek"; case ATA_MICRON_ID: return "Micron"; default: return "Generic"; } } int ata_mode2idx(int mode) { if ((mode & ATA_DMA_MASK) == ATA_UDMA0) return (mode & ATA_MODE_MASK) + 8; if ((mode & ATA_DMA_MASK) == ATA_WDMA0) return (mode & ATA_MODE_MASK) + 5; return (mode & ATA_MODE_MASK) - ATA_PIO0; } diff --git a/sys/dev/ata/ata-pci.h b/sys/dev/ata/ata-pci.h index 2fc8b278d589..ab0e661de29f 100644 --- a/sys/dev/ata/ata-pci.h +++ b/sys/dev/ata/ata-pci.h @@ -1,600 +1,599 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 - 2008 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* structure holding chipset config info */ struct ata_chip_id { u_int32_t chipid; u_int8_t chiprev; int cfg1; int cfg2; u_int8_t max_dma; const char *text; }; #define ATA_PCI_MAX_CH 8 /* structure describing a PCI ATA controller */ struct ata_pci_controller { device_t dev; int r_type1; int r_rid1; struct resource *r_res1; int r_type2; int r_rid2; struct resource *r_res2; int r_irq_rid; struct resource *r_irq; void *handle; const struct ata_chip_id *chip; int legacy; int channels; int ichannels; int (*chipinit)(device_t); int (*chipdeinit)(device_t); int (*suspend)(device_t); int (*resume)(device_t); int (*ch_attach)(device_t); int (*ch_detach)(device_t); int (*ch_suspend)(device_t); int (*ch_resume)(device_t); void (*reset)(device_t); int (*setmode)(device_t, int, int); int (*getrev)(device_t, int); struct { void (*function)(void *); void *argument; } interrupt[ATA_PCI_MAX_CH]; void *chipset_data; }; /* defines for known chipset PCI id's */ #define ATA_ACARD_ID 0x1191 #define ATA_ATP850 0x00021191 #define ATA_ATP850A 0x00041191 #define ATA_ATP850R 0x00051191 #define ATA_ATP860A 0x00061191 #define ATA_ATP860R 0x00071191 #define ATA_ATP865A 0x00081191 #define ATA_ATP865R 0x00091191 #define ATA_ACER_LABS_ID 0x10b9 #define ATA_ALI_1533 0x153310b9 #define ATA_ALI_5228 0x522810b9 #define ATA_ALI_5229 0x522910b9 #define ATA_ALI_5281 0x528110b9 #define ATA_ALI_5287 0x528710b9 #define ATA_ALI_5288 0x528810b9 #define ATA_ALI_5289 0x528910b9 #define ATA_AMD_ID 0x1022 #define ATA_AMD755 0x74011022 #define ATA_AMD756 0x74091022 #define ATA_AMD766 0x74111022 #define ATA_AMD768 0x74411022 #define ATA_AMD8111 0x74691022 #define ATA_AMD5536 0x209a1022 #define ATA_AMD_HUDSON2_S1 0x78001022 #define ATA_AMD_HUDSON2_S2 0x78011022 #define ATA_AMD_HUDSON2_S3 0x78021022 #define ATA_AMD_HUDSON2_S4 0x78031022 #define ATA_AMD_HUDSON2_S5 0x78041022 #define ATA_AMD_HUDSON2 0x780c1022 #define ATA_ADAPTEC_ID 0x9005 #define ATA_ADAPTEC_1420 0x02419005 #define ATA_ADAPTEC_1430 0x02439005 #define ATA_ATI_ID 0x1002 #define ATA_ATI_IXP200 0x43491002 #define ATA_ATI_IXP300 0x43691002 #define ATA_ATI_IXP300_S1 0x436e1002 #define ATA_ATI_IXP400 0x43761002 #define ATA_ATI_IXP400_S1 0x43791002 #define ATA_ATI_IXP400_S2 0x437a1002 #define ATA_ATI_IXP600 0x438c1002 #define ATA_ATI_IXP600_S1 0x43801002 #define ATA_ATI_IXP700 0x439c1002 #define ATA_ATI_IXP700_S1 0x43901002 #define ATA_ATI_IXP700_S2 0x43911002 #define ATA_ATI_IXP700_S3 0x43921002 #define ATA_ATI_IXP700_S4 0x43931002 #define ATA_ATI_IXP800_S1 0x43941002 #define ATA_ATI_IXP800_S2 0x43951002 #define ATA_CENATEK_ID 0x16ca #define ATA_CENATEK_ROCKET 0x000116ca #define ATA_CYRIX_ID 0x1078 #define ATA_CYRIX_5530 0x01021078 #define ATA_CYPRESS_ID 0x1080 #define ATA_CYPRESS_82C693 0xc6931080 #define ATA_DEC_21150 0x00221011 #define ATA_DEC_21150_1 0x00231011 #define ATA_HIGHPOINT_ID 0x1103 #define ATA_HPT366 0x00041103 #define ATA_HPT372 0x00051103 #define ATA_HPT302 0x00061103 #define ATA_HPT371 0x00071103 #define ATA_HPT374 0x00081103 #define ATA_INTEL_ID 0x8086 #define ATA_I960RM 0x09628086 #define ATA_I82371FB 0x12308086 #define ATA_I82371SB 0x70108086 #define ATA_I82371AB 0x71118086 #define ATA_I82443MX 0x71998086 #define ATA_I82451NX 0x84ca8086 #define ATA_I82372FB 0x76018086 #define ATA_I82801AB 0x24218086 #define ATA_I82801AA 0x24118086 #define ATA_I82801BA 0x244a8086 #define ATA_I82801BA_1 0x244b8086 #define ATA_I82801CA 0x248a8086 #define ATA_I82801CA_1 0x248b8086 #define ATA_I82801DB 0x24cb8086 #define ATA_I82801DB_1 0x24ca8086 #define ATA_I82801EB 0x24db8086 #define ATA_I82801EB_S1 0x24d18086 #define ATA_I82801EB_R1 0x24df8086 #define ATA_I6300ESB 0x25a28086 #define ATA_I6300ESB_S1 0x25a38086 #define ATA_I6300ESB_R1 0x25b08086 #define ATA_I63XXESB2 0x269e8086 #define ATA_I63XXESB2_S1 0x26808086 #define ATA_I82801FB 0x266f8086 #define ATA_I82801FB_S1 0x26518086 #define ATA_I82801FB_R1 0x26528086 #define ATA_I82801FBM 0x26538086 #define ATA_I82801GB 0x27df8086 #define ATA_I82801GB_S1 0x27c08086 #define ATA_I82801GBM_S1 0x27c48086 #define ATA_I82801HB_S1 0x28208086 #define ATA_I82801HB_S2 0x28258086 #define ATA_I82801HBM 0x28508086 #define ATA_I82801HBM_S1 0x28288086 #define ATA_I82801IB_S1 0x29208086 #define ATA_I82801IB_S3 0x29218086 #define ATA_I82801IB_R1 0x29258086 #define ATA_I82801IB_S2 0x29268086 #define ATA_I82801IBM_S1 0x29288086 #define ATA_I82801IBM_S2 0x292d8086 #define ATA_I82801JIB_S1 0x3a208086 #define ATA_I82801JIB_S2 0x3a268086 #define ATA_I82801JD_S1 0x3a008086 #define ATA_I82801JD_S2 0x3a068086 #define ATA_I82801JI_S1 0x3a208086 #define ATA_I82801JI_S2 0x3a268086 #define ATA_IBP_S1 0x3b208086 #define ATA_IBP_S2 0x3b218086 #define ATA_IBP_S3 0x3b268086 #define ATA_IBP_S4 0x3b288086 #define ATA_IBP_S5 0x3b2d8086 #define ATA_IBP_S6 0x3b2e8086 #define ATA_CPT_S1 0x1c008086 #define ATA_CPT_S2 0x1c018086 #define ATA_CPT_S3 0x1c088086 #define ATA_CPT_S4 0x1c098086 #define ATA_PBG_S1 0x1d008086 #define ATA_PBG_S2 0x1d088086 #define ATA_PPT_S1 0x1e008086 #define ATA_PPT_S2 0x1e018086 #define ATA_PPT_S3 0x1e088086 #define ATA_PPT_S4 0x1e098086 #define ATA_AVOTON_S1 0x1f208086 #define ATA_AVOTON_S2 0x1f218086 #define ATA_AVOTON_S3 0x1f308086 #define ATA_AVOTON_S4 0x1f318086 #define ATA_LPT_S1 0x8c008086 #define ATA_LPT_S2 0x8c018086 #define ATA_LPT_S3 0x8c088086 #define ATA_LPT_S4 0x8c098086 #define ATA_WCPT_S1 0x8c808086 #define ATA_WCPT_S2 0x8c818086 #define ATA_WCPT_S3 0x8c888086 #define ATA_WCPT_S4 0x8c898086 #define ATA_WELLS_S1 0x8d008086 #define ATA_WELLS_S2 0x8d088086 #define ATA_WELLS_S3 0x8d608086 #define ATA_WELLS_S4 0x8d688086 #define ATA_LPTLP_S1 0x9c008086 #define ATA_LPTLP_S2 0x9c018086 #define ATA_LPTLP_S3 0x9c088086 #define ATA_LPTLP_S4 0x9c098086 #define ATA_I31244 0x32008086 #define ATA_ISCH 0x811a8086 #define ATA_COLETOCRK_S1 0x23a18086 #define ATA_COLETOCRK_S2 0x23a68086 #define ATA_ITE_ID 0x1283 #define ATA_IT8211F 0x82111283 #define ATA_IT8212F 0x82121283 #define ATA_IT8213F 0x82131283 #define ATA_JMICRON_ID 0x197b #define ATA_JMB360 0x2360197b #define ATA_JMB361 0x2361197b #define ATA_JMB362 0x2362197b #define ATA_JMB363 0x2363197b #define ATA_JMB365 0x2365197b #define ATA_JMB366 0x2366197b #define ATA_JMB368 0x2368197b #define ATA_JMB368_2 0x0368197b #define ATA_MARVELL_ID 0x11ab #define ATA_M88SE6101 0x610111ab #define ATA_M88SE6102 0x610211ab #define ATA_M88SE6111 0x611111ab #define ATA_M88SE6121 0x612111ab #define ATA_M88SE6141 0x614111ab #define ATA_M88SE6145 0x614511ab #define ATA_MARVELL2_ID 0x1b4b #define ATA_MICRON_ID 0x1042 #define ATA_MICRON_RZ1000 0x10001042 #define ATA_MICRON_RZ1001 0x10011042 #define ATA_NATIONAL_ID 0x100b #define ATA_SC1100 0x0502100b #define ATA_NETCELL_ID 0x169c #define ATA_NETCELL_SR 0x0044169c #define ATA_NVIDIA_ID 0x10de #define ATA_NFORCE1 0x01bc10de #define ATA_NFORCE2 0x006510de #define ATA_NFORCE2_PRO 0x008510de #define ATA_NFORCE2_PRO_S1 0x008e10de #define ATA_NFORCE3 0x00d510de #define ATA_NFORCE3_PRO 0x00e510de #define ATA_NFORCE3_PRO_S1 0x00e310de #define ATA_NFORCE3_PRO_S2 0x00ee10de #define ATA_NFORCE_MCP04 0x003510de #define ATA_NFORCE_MCP04_S1 0x003610de #define ATA_NFORCE_MCP04_S2 0x003e10de #define ATA_NFORCE_CK804 0x005310de #define ATA_NFORCE_CK804_S1 0x005410de #define ATA_NFORCE_CK804_S2 0x005510de #define ATA_NFORCE_MCP51 0x026510de #define ATA_NFORCE_MCP51_S1 0x026610de #define ATA_NFORCE_MCP51_S2 0x026710de #define ATA_NFORCE_MCP55 0x036e10de #define ATA_NFORCE_MCP55_S1 0x037e10de #define ATA_NFORCE_MCP55_S2 0x037f10de #define ATA_NFORCE_MCP61 0x03ec10de #define ATA_NFORCE_MCP61_S1 0x03e710de #define ATA_NFORCE_MCP61_S2 0x03f610de #define ATA_NFORCE_MCP61_S3 0x03f710de #define ATA_NFORCE_MCP65 0x044810de #define ATA_NFORCE_MCP65_A0 0x044c10de #define ATA_NFORCE_MCP65_A1 0x044d10de #define ATA_NFORCE_MCP65_A2 0x044e10de #define ATA_NFORCE_MCP65_A3 0x044f10de #define ATA_NFORCE_MCP65_A4 0x045c10de #define ATA_NFORCE_MCP65_A5 0x045d10de #define ATA_NFORCE_MCP65_A6 0x045e10de #define ATA_NFORCE_MCP65_A7 0x045f10de #define ATA_NFORCE_MCP67 0x056010de #define ATA_NFORCE_MCP67_A0 0x055010de #define ATA_NFORCE_MCP67_A1 0x055110de #define ATA_NFORCE_MCP67_A2 0x055210de #define ATA_NFORCE_MCP67_A3 0x055310de #define ATA_NFORCE_MCP67_A4 0x055410de #define ATA_NFORCE_MCP67_A5 0x055510de #define ATA_NFORCE_MCP67_A6 0x055610de #define ATA_NFORCE_MCP67_A7 0x055710de #define ATA_NFORCE_MCP67_A8 0x055810de #define ATA_NFORCE_MCP67_A9 0x055910de #define ATA_NFORCE_MCP67_AA 0x055A10de #define ATA_NFORCE_MCP67_AB 0x055B10de #define ATA_NFORCE_MCP67_AC 0x058410de #define ATA_NFORCE_MCP73 0x056c10de #define ATA_NFORCE_MCP73_A0 0x07f010de #define ATA_NFORCE_MCP73_A1 0x07f110de #define ATA_NFORCE_MCP73_A2 0x07f210de #define ATA_NFORCE_MCP73_A3 0x07f310de #define ATA_NFORCE_MCP73_A4 0x07f410de #define ATA_NFORCE_MCP73_A5 0x07f510de #define ATA_NFORCE_MCP73_A6 0x07f610de #define ATA_NFORCE_MCP73_A7 0x07f710de #define ATA_NFORCE_MCP73_A8 0x07f810de #define ATA_NFORCE_MCP73_A9 0x07f910de #define ATA_NFORCE_MCP73_AA 0x07fa10de #define ATA_NFORCE_MCP73_AB 0x07fb10de #define ATA_NFORCE_MCP77 0x075910de #define ATA_NFORCE_MCP77_A0 0x0ad010de #define ATA_NFORCE_MCP77_A1 0x0ad110de #define ATA_NFORCE_MCP77_A2 0x0ad210de #define ATA_NFORCE_MCP77_A3 0x0ad310de #define ATA_NFORCE_MCP77_A4 0x0ad410de #define ATA_NFORCE_MCP77_A5 0x0ad510de #define ATA_NFORCE_MCP77_A6 0x0ad610de #define ATA_NFORCE_MCP77_A7 0x0ad710de #define ATA_NFORCE_MCP77_A8 0x0ad810de #define ATA_NFORCE_MCP77_A9 0x0ad910de #define ATA_NFORCE_MCP77_AA 0x0ada10de #define ATA_NFORCE_MCP77_AB 0x0adb10de #define ATA_NFORCE_MCP79_A0 0x0ab410de #define ATA_NFORCE_MCP79_A1 0x0ab510de #define ATA_NFORCE_MCP79_A2 0x0ab610de #define ATA_NFORCE_MCP79_A3 0x0ab710de #define ATA_NFORCE_MCP79_A4 0x0ab810de #define ATA_NFORCE_MCP79_A5 0x0ab910de #define ATA_NFORCE_MCP79_A6 0x0aba10de #define ATA_NFORCE_MCP79_A7 0x0abb10de #define ATA_NFORCE_MCP79_A8 0x0abc10de #define ATA_NFORCE_MCP79_A9 0x0abd10de #define ATA_NFORCE_MCP79_AA 0x0abe10de #define ATA_NFORCE_MCP79_AB 0x0abf10de #define ATA_NFORCE_MCP89_A0 0x0d8410de #define ATA_NFORCE_MCP89_A1 0x0d8510de #define ATA_NFORCE_MCP89_A2 0x0d8610de #define ATA_NFORCE_MCP89_A3 0x0d8710de #define ATA_NFORCE_MCP89_A4 0x0d8810de #define ATA_NFORCE_MCP89_A5 0x0d8910de #define ATA_NFORCE_MCP89_A6 0x0d8a10de #define ATA_NFORCE_MCP89_A7 0x0d8b10de #define ATA_NFORCE_MCP89_A8 0x0d8c10de #define ATA_NFORCE_MCP89_A9 0x0d8d10de #define ATA_NFORCE_MCP89_AA 0x0d8e10de #define ATA_NFORCE_MCP89_AB 0x0d8f10de #define ATA_PROMISE_ID 0x105a #define ATA_PDC20246 0x4d33105a #define ATA_PDC20262 0x4d38105a #define ATA_PDC20263 0x0d38105a #define ATA_PDC20265 0x0d30105a #define ATA_PDC20267 0x4d30105a #define ATA_PDC20268 0x4d68105a #define ATA_PDC20269 0x4d69105a #define ATA_PDC20270 0x6268105a #define ATA_PDC20271 0x6269105a #define ATA_PDC20275 0x1275105a #define ATA_PDC20276 0x5275105a #define ATA_PDC20277 0x7275105a #define ATA_PDC20318 0x3318105a #define ATA_PDC20319 0x3319105a #define ATA_PDC20371 0x3371105a #define ATA_PDC20375 0x3375105a #define ATA_PDC20376 0x3376105a #define ATA_PDC20377 0x3377105a #define ATA_PDC20378 0x3373105a #define ATA_PDC20379 0x3372105a #define ATA_PDC20571 0x3571105a #define ATA_PDC20575 0x3d75105a #define ATA_PDC20579 0x3574105a #define ATA_PDC20771 0x3570105a #define ATA_PDC40518 0x3d18105a #define ATA_PDC40519 0x3519105a #define ATA_PDC40718 0x3d17105a #define ATA_PDC40719 0x3515105a #define ATA_PDC40775 0x3d73105a #define ATA_PDC40779 0x3577105a #define ATA_PDC20617 0x6617105a #define ATA_PDC20618 0x6626105a #define ATA_PDC20619 0x6629105a #define ATA_PDC20620 0x6620105a #define ATA_PDC20621 0x6621105a #define ATA_PDC20622 0x6622105a #define ATA_PDC20624 0x6624105a #define ATA_PDC81518 0x8002105a #define ATA_SERVERWORKS_ID 0x1166 #define ATA_ROSB4_ISA 0x02001166 #define ATA_ROSB4 0x02111166 #define ATA_CSB5 0x02121166 #define ATA_CSB6 0x02131166 #define ATA_CSB6_1 0x02171166 #define ATA_HT1000 0x02141166 #define ATA_HT1000_S1 0x024b1166 #define ATA_HT1000_S2 0x024a1166 #define ATA_K2 0x02401166 #define ATA_FRODO4 0x02411166 #define ATA_FRODO8 0x02421166 #define ATA_SILICON_IMAGE_ID 0x1095 #define ATA_SII3114 0x31141095 #define ATA_SII3512 0x35121095 #define ATA_SII3112 0x31121095 #define ATA_SII3112_1 0x02401095 #define ATA_SII0680 0x06801095 #define ATA_CMD646 0x06461095 #define ATA_CMD648 0x06481095 #define ATA_CMD649 0x06491095 #define ATA_SIS_ID 0x1039 #define ATA_SISSOUTH 0x00081039 #define ATA_SIS5511 0x55111039 #define ATA_SIS5513 0x55131039 #define ATA_SIS5517 0x55171039 #define ATA_SIS5518 0x55181039 #define ATA_SIS5571 0x55711039 #define ATA_SIS5591 0x55911039 #define ATA_SIS5596 0x55961039 #define ATA_SIS5597 0x55971039 #define ATA_SIS5598 0x55981039 #define ATA_SIS5600 0x56001039 #define ATA_SIS530 0x05301039 #define ATA_SIS540 0x05401039 #define ATA_SIS550 0x05501039 #define ATA_SIS620 0x06201039 #define ATA_SIS630 0x06301039 #define ATA_SIS635 0x06351039 #define ATA_SIS633 0x06331039 #define ATA_SIS640 0x06401039 #define ATA_SIS645 0x06451039 #define ATA_SIS646 0x06461039 #define ATA_SIS648 0x06481039 #define ATA_SIS650 0x06501039 #define ATA_SIS651 0x06511039 #define ATA_SIS652 0x06521039 #define ATA_SIS655 0x06551039 #define ATA_SIS658 0x06581039 #define ATA_SIS661 0x06611039 #define ATA_SIS730 0x07301039 #define ATA_SIS733 0x07331039 #define ATA_SIS735 0x07351039 #define ATA_SIS740 0x07401039 #define ATA_SIS745 0x07451039 #define ATA_SIS746 0x07461039 #define ATA_SIS748 0x07481039 #define ATA_SIS750 0x07501039 #define ATA_SIS751 0x07511039 #define ATA_SIS752 0x07521039 #define ATA_SIS755 0x07551039 #define ATA_SIS961 0x09611039 #define ATA_SIS962 0x09621039 #define ATA_SIS963 0x09631039 #define ATA_SIS964 0x09641039 #define ATA_SIS965 0x09651039 #define ATA_SIS180 0x01801039 #define ATA_SIS181 0x01811039 #define ATA_SIS182 0x01821039 #define ATA_VIA_ID 0x1106 #define ATA_VIA82C571 0x05711106 #define ATA_VIA82C586 0x05861106 #define ATA_VIA82C596 0x05961106 #define ATA_VIA82C686 0x06861106 #define ATA_VIA8231 0x82311106 #define ATA_VIA8233 0x30741106 #define ATA_VIA8233A 0x31471106 #define ATA_VIA8233C 0x31091106 #define ATA_VIA8235 0x31771106 #define ATA_VIA8237 0x32271106 #define ATA_VIA8237A 0x05911106 #define ATA_VIA8237S 0x53371106 #define ATA_VIA8237_5372 0x53721106 #define ATA_VIA8237_7372 0x73721106 #define ATA_VIA8251 0x33491106 #define ATA_VIA8361 0x31121106 #define ATA_VIA8363 0x03051106 #define ATA_VIA8371 0x03911106 #define ATA_VIA8662 0x31021106 #define ATA_VIA6410 0x31641106 #define ATA_VIA6420 0x31491106 #define ATA_VIA6421 0x32491106 #define ATA_VIACX700IDE 0x05811106 #define ATA_VIACX700 0x83241106 #define ATA_VIASATAIDE 0x53241106 #define ATA_VIAVX800 0x83531106 #define ATA_VIASATAIDE2 0xc4091106 #define ATA_VIAVX855 0x84091106 #define ATA_VIASATAIDE3 0x90011106 #define ATA_VIAVX900 0x84101106 /* global prototypes ata-pci.c */ int ata_pci_probe(device_t dev); int ata_pci_attach(device_t dev); int ata_pci_detach(device_t dev); int ata_pci_suspend(device_t dev); int ata_pci_resume(device_t dev); int ata_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); int ata_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value); uint32_t ata_pci_read_config(device_t dev, device_t child, int reg, int width); void ata_pci_write_config(device_t dev, device_t child, int reg, uint32_t val, int width); int ata_pci_print_child(device_t dev, device_t child); -int ata_pci_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen); +int ata_pci_child_location(device_t dev, device_t child, struct sbuf *sb); struct resource * ata_pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int ata_pci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int ata_pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep); int ata_pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int ata_pci_ch_attach(device_t dev); int ata_pci_ch_detach(device_t dev); int ata_pci_status(device_t dev); void ata_pci_hw(device_t dev); void ata_pci_dmainit(device_t dev); void ata_pci_dmafini(device_t dev); const char *ata_pcivendor2str(device_t dev); int ata_legacy(device_t); void ata_generic_intr(void *data); int ata_generic_chipinit(device_t dev); int ata_generic_setmode(device_t dev, int target, int mode); int ata_setup_interrupt(device_t dev, void *intr_func); void ata_set_desc(device_t dev); const struct ata_chip_id *ata_match_chip(device_t dev, const struct ata_chip_id *index); const struct ata_chip_id *ata_find_chip(device_t dev, const struct ata_chip_id *index, int slot); int ata_mode2idx(int mode); /* global prototypes from chipsets/ata-*.c */ int ata_sii_chipinit(device_t); /* externs */ extern devclass_t ata_pci_devclass; MALLOC_DECLARE(M_ATAPCI); /* macro for easy definition of all driver module stuff */ #define ATA_DECLARE_DRIVER(dname) \ static device_method_t __CONCAT(dname,_methods)[] = { \ DEVMETHOD(device_probe, __CONCAT(dname,_probe)), \ DEVMETHOD(device_attach, ata_pci_attach), \ DEVMETHOD(device_detach, ata_pci_detach), \ DEVMETHOD(device_suspend, ata_pci_suspend), \ DEVMETHOD(device_resume, ata_pci_resume), \ DEVMETHOD(device_shutdown, bus_generic_shutdown), \ DEVMETHOD(bus_read_ivar, ata_pci_read_ivar), \ DEVMETHOD(bus_write_ivar, ata_pci_write_ivar), \ DEVMETHOD(bus_alloc_resource, ata_pci_alloc_resource), \ DEVMETHOD(bus_release_resource, ata_pci_release_resource), \ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), \ DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), \ DEVMETHOD(bus_setup_intr, ata_pci_setup_intr), \ DEVMETHOD(bus_teardown_intr, ata_pci_teardown_intr), \ DEVMETHOD(pci_read_config, ata_pci_read_config), \ DEVMETHOD(pci_write_config, ata_pci_write_config), \ DEVMETHOD(bus_print_child, ata_pci_print_child), \ - DEVMETHOD(bus_child_location_str, ata_pci_child_location_str), \ + DEVMETHOD(bus_child_location, ata_pci_child_location), \ DEVMETHOD_END \ }; \ static driver_t __CONCAT(dname,_driver) = { \ "atapci", \ __CONCAT(dname,_methods), \ sizeof(struct ata_pci_controller) \ }; \ DRIVER_MODULE(dname, pci, __CONCAT(dname,_driver), ata_pci_devclass, NULL, NULL); \ MODULE_VERSION(dname, 1); \ MODULE_DEPEND(dname, ata, 1, 1, 1); \ MODULE_DEPEND(dname, atapci, 1, 1, 1); diff --git a/sys/dev/ata/chipsets/ata-fsl.c b/sys/dev/ata/chipsets/ata-fsl.c index f7545e211358..32fe2a04bd19 100644 --- a/sys/dev/ata/chipsets/ata-fsl.c +++ b/sys/dev/ata/chipsets/ata-fsl.c @@ -1,242 +1,242 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * This software was developed by Oleksandr Rybalko under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* local prototypes */ static int imx_ata_ch_attach(device_t dev); static int imx_ata_setmode(device_t dev, int target, int mode); static int imx_ata_probe(device_t dev) { struct ata_pci_controller *ctrl; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "fsl,imx51-ata") && !ofw_bus_is_compatible(dev, "fsl,imx53-ata")) return (ENXIO); ctrl = device_get_softc(dev); device_set_desc(dev, "Freescale Integrated PATA Controller"); return (BUS_PROBE_LOW_PRIORITY); } static void imx_ata_intr(void *data) { struct ata_pci_controller *ctrl = data; bus_write_2(ctrl->r_res1, 0x28, bus_read_2(ctrl->r_res1, 0x28)); ctrl->interrupt[0].function(ctrl->interrupt[0].argument); } static int imx_ata_attach(device_t dev) { struct ata_pci_controller *ctrl; device_t child; int unit; ctrl = device_get_softc(dev); /* do chipset specific setups only needed once */ ctrl->legacy = ata_legacy(dev); ctrl->channels = 1; ctrl->ichannels = -1; ctrl->ch_attach = ata_pci_ch_attach; ctrl->ch_detach = ata_pci_ch_detach; ctrl->dev = dev; ctrl->r_type1 = SYS_RES_MEMORY; ctrl->r_rid1 = 0; ctrl->r_res1 = bus_alloc_resource_any(dev, ctrl->r_type1, &ctrl->r_rid1, RF_ACTIVE); if (ata_setup_interrupt(dev, imx_ata_intr)) { device_printf(dev, "failed to setup interrupt\n"); return ENXIO; } ctrl->channels = 1; ctrl->ch_attach = imx_ata_ch_attach; ctrl->setmode = imx_ata_setmode; /* attach all channels on this controller */ unit = 0; child = device_add_child(dev, "ata", ((unit == 0) && ctrl->legacy) ? unit : devclass_find_free_unit(ata_devclass, 2)); if (child == NULL) device_printf(dev, "failed to add ata child device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); bus_generic_attach(dev); return 0; } static int imx_ata_ch_attach(device_t dev) { struct ata_pci_controller *ctrl; struct ata_channel *ch; int i; ctrl = device_get_softc(device_get_parent(dev)); ch = device_get_softc(dev); for (i = ATA_DATA; i < ATA_MAX_RES; i++) ch->r_io[i].res = ctrl->r_res1; bus_write_2(ctrl->r_res1, 0x24, 0x80); DELAY(100); bus_write_2(ctrl->r_res1, 0x24, 0xc0); DELAY(100); /* Write TIME_OFF/ON/1/2W */ bus_write_1(ctrl->r_res1, 0x00, 3); bus_write_1(ctrl->r_res1, 0x01, 3); bus_write_1(ctrl->r_res1, 0x02, (25 + 15) / 15); bus_write_1(ctrl->r_res1, 0x03, (70 + 15) / 15); /* Write TIME_2R/AX/RDX/4 */ bus_write_1(ctrl->r_res1, 0x04, (70 + 15) / 15); bus_write_1(ctrl->r_res1, 0x05, (50 + 15) / 15 + 2); bus_write_1(ctrl->r_res1, 0x06, 1); bus_write_1(ctrl->r_res1, 0x07, (10 + 15) / 15); /* Write TIME_9 ; the rest of timing registers is irrelevant for PIO */ bus_write_1(ctrl->r_res1, 0x08, (10 + 15) / 15); bus_write_2(ctrl->r_res1, 0x24, 0xc1); DELAY(30000); /* setup ATA registers */ ch->r_io[ATA_DATA ].offset = 0xa0; ch->r_io[ATA_FEATURE].offset = 0xa4; ch->r_io[ATA_ERROR ].offset = 0xa4; ch->r_io[ATA_COUNT ].offset = 0xa8; ch->r_io[ATA_SECTOR ].offset = 0xac; ch->r_io[ATA_CYL_LSB].offset = 0xb0; ch->r_io[ATA_CYL_MSB].offset = 0xb4; ch->r_io[ATA_DRIVE ].offset = 0xb8; ch->r_io[ATA_COMMAND].offset = 0xbc; ch->r_io[ATA_STATUS ].offset = 0xbc; ch->r_io[ATA_ALTSTAT].offset = 0xd8; ch->r_io[ATA_CONTROL].offset = 0xd8; ata_pci_hw(dev); ch->flags |= ATA_NO_SLAVE; ch->flags |= ATA_USE_16BIT; ch->flags |= ATA_CHECKS_CABLE; ch->flags |= ATA_KNOWN_PRESENCE; /* Clear pending interrupts. */ bus_write_2(ctrl->r_res1, 0x28, 0xf8); /* Enable all, but Idle interrupts. */ bus_write_2(ctrl->r_res1, 0x2c, 0x88); return 0; } static int imx_ata_setmode(device_t dev, int target, int mode) { return (min(mode, ATA_PIO4)); } static device_method_t imx_ata_methods[] = { DEVMETHOD(device_probe, imx_ata_probe), DEVMETHOD(device_attach, imx_ata_attach), DEVMETHOD(device_detach, ata_pci_detach), DEVMETHOD(device_suspend, ata_pci_suspend), DEVMETHOD(device_resume, ata_pci_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(bus_read_ivar, ata_pci_read_ivar), DEVMETHOD(bus_write_ivar, ata_pci_write_ivar), DEVMETHOD(bus_alloc_resource, ata_pci_alloc_resource), DEVMETHOD(bus_release_resource, ata_pci_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, ata_pci_setup_intr), DEVMETHOD(bus_teardown_intr, ata_pci_teardown_intr), DEVMETHOD(pci_read_config, ata_pci_read_config), DEVMETHOD(pci_write_config, ata_pci_write_config), DEVMETHOD(bus_print_child, ata_pci_print_child), - DEVMETHOD(bus_child_location_str, ata_pci_child_location_str), + DEVMETHOD(bus_child_location, ata_pci_child_location), DEVMETHOD_END }; static driver_t imx_ata_driver = { "atapci", imx_ata_methods, sizeof(struct ata_pci_controller) }; DRIVER_MODULE(imx_ata, simplebus, imx_ata_driver, ata_pci_devclass, NULL, NULL); MODULE_VERSION(imx_ata, 1); MODULE_DEPEND(imx_ata, ata, 1, 1, 1); MODULE_DEPEND(imx_ata, atapci, 1, 1, 1); diff --git a/sys/dev/bhnd/bhnd.c b/sys/dev/bhnd/bhnd.c index 1ec4cb1ce48d..654f42864233 100644 --- a/sys/dev/bhnd/bhnd.c +++ b/sys/dev/bhnd/bhnd.c @@ -1,1209 +1,1194 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Broadcom Home Networking Division (HND) Bus Driver. * * The Broadcom HND family of devices consists of both SoCs and host-connected * networking chipsets containing a common family of Broadcom IP cores, * including an integrated MIPS and/or ARM cores. * * HND devices expose a nearly identical interface whether accessible over a * native SoC interconnect, or when connected via a host interface such as * PCIe. As a result, the majority of hardware support code should be re-usable * across host drivers for HND networking chipsets, as well as FreeBSD support * for Broadcom MIPS/ARM HND SoCs. * * Earlier HND models used the siba(4) on-chip interconnect, while later models * use bcma(4); the programming model is almost entirely independent * of the actual underlying interconect. */ #include #include #include #include +#include #include #include #include #include #include #include "bhnd_chipc_if.h" #include "bhnd_nvram_if.h" #include "bhnd.h" #include "bhndreg.h" #include "bhndvar.h" #include "bhnd_private.h" MALLOC_DEFINE(M_BHND, "bhnd", "bhnd bus data structures"); /** * bhnd_generic_probe_nomatch() reporting configuration. */ static const struct bhnd_nomatch { uint16_t vendor; /**< core designer */ uint16_t device; /**< core id */ bool if_verbose; /**< print when bootverbose is set. */ } bhnd_nomatch_table[] = { { BHND_MFGID_ARM, BHND_COREID_OOB_ROUTER, true }, { BHND_MFGID_ARM, BHND_COREID_EROM, true }, { BHND_MFGID_ARM, BHND_COREID_PL301, true }, { BHND_MFGID_ARM, BHND_COREID_APB_BRIDGE, true }, { BHND_MFGID_ARM, BHND_COREID_AXI_UNMAPPED, false }, { BHND_MFGID_INVALID, BHND_COREID_INVALID, false } }; static int bhnd_delete_children(struct bhnd_softc *sc); /** * Default bhnd(4) bus driver implementation of DEVICE_ATTACH(). * * This implementation calls device_probe_and_attach() for each of the device's * children, in bhnd probe order. */ int bhnd_generic_attach(device_t dev) { struct bhnd_softc *sc; int error; if (device_is_attached(dev)) return (EBUSY); sc = device_get_softc(dev); sc->dev = dev; /* Probe and attach all children */ if ((error = bhnd_bus_probe_children(dev))) { bhnd_delete_children(sc); return (error); } return (0); } /** * Detach and delete all children, in reverse of their attach order. */ static int bhnd_delete_children(struct bhnd_softc *sc) { device_t *devs; int ndevs; int error; /* Fetch children in detach order */ error = bhnd_bus_get_children(sc->dev, &devs, &ndevs, BHND_DEVICE_ORDER_DETACH); if (error) return (error); /* Perform detach */ for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; /* Terminate on first error */ if ((error = device_delete_child(sc->dev, child))) goto cleanup; } cleanup: bhnd_bus_free_children(devs); return (error); } /** * Default bhnd(4) bus driver implementation of DEVICE_DETACH(). * * This implementation calls device_detach() for each of the device's * children, in reverse bhnd probe order, terminating if any call to * device_detach() fails. */ int bhnd_generic_detach(device_t dev) { struct bhnd_softc *sc; int error; if (!device_is_attached(dev)) return (EBUSY); sc = device_get_softc(dev); if ((error = bhnd_delete_children(sc))) return (error); return (0); } /** * Default bhnd(4) bus driver implementation of DEVICE_SHUTDOWN(). * * This implementation calls device_shutdown() for each of the device's * children, in reverse bhnd probe order, terminating if any call to * device_shutdown() fails. */ int bhnd_generic_shutdown(device_t dev) { device_t *devs; int ndevs; int error; if (!device_is_attached(dev)) return (EBUSY); /* Fetch children in detach order */ error = bhnd_bus_get_children(dev, &devs, &ndevs, BHND_DEVICE_ORDER_DETACH); if (error) return (error); /* Perform shutdown */ for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; /* Terminate on first error */ if ((error = device_shutdown(child))) goto cleanup; } cleanup: bhnd_bus_free_children(devs); return (error); } /** * Default bhnd(4) bus driver implementation of DEVICE_RESUME(). * * This implementation calls BUS_RESUME_CHILD() for each of the device's * children in bhnd probe order, terminating if any call to BUS_RESUME_CHILD() * fails. */ int bhnd_generic_resume(device_t dev) { device_t *devs; int ndevs; int error; if (!device_is_attached(dev)) return (EBUSY); /* Fetch children in attach order */ error = bhnd_bus_get_children(dev, &devs, &ndevs, BHND_DEVICE_ORDER_ATTACH); if (error) return (error); /* Perform resume */ for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; /* Terminate on first error */ if ((error = BUS_RESUME_CHILD(device_get_parent(child), child))) goto cleanup; } cleanup: bhnd_bus_free_children(devs); return (error); } /** * Default bhnd(4) bus driver implementation of DEVICE_SUSPEND(). * * This implementation calls BUS_SUSPEND_CHILD() for each of the device's * children in reverse bhnd probe order. If any call to BUS_SUSPEND_CHILD() * fails, the suspend operation is terminated and any devices that were * suspended are resumed immediately by calling their BUS_RESUME_CHILD() * methods. */ int bhnd_generic_suspend(device_t dev) { device_t *devs; int ndevs; int error; if (!device_is_attached(dev)) return (EBUSY); /* Fetch children in detach order */ error = bhnd_bus_get_children(dev, &devs, &ndevs, BHND_DEVICE_ORDER_DETACH); if (error) return (error); /* Perform suspend */ for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; error = BUS_SUSPEND_CHILD(device_get_parent(child), child); /* On error, resume suspended devices and then terminate */ if (error) { for (int j = 0; j < i; j++) { BUS_RESUME_CHILD(device_get_parent(devs[j]), devs[j]); } goto cleanup; } } cleanup: bhnd_bus_free_children(devs); return (error); } /** * Default bhnd(4) bus driver implementation of BHND_BUS_GET_PROBE_ORDER(). * * This implementation determines probe ordering based on the device's class * and other properties, including whether the device is serving as a host * bridge. */ int bhnd_generic_get_probe_order(device_t dev, device_t child) { switch (bhnd_get_class(child)) { case BHND_DEVCLASS_CC: /* Must be early enough to provide NVRAM access to the * host bridge */ return (BHND_PROBE_ROOT + BHND_PROBE_ORDER_FIRST); case BHND_DEVCLASS_CC_B: /* fall through */ case BHND_DEVCLASS_PMU: return (BHND_PROBE_BUS + BHND_PROBE_ORDER_EARLY); case BHND_DEVCLASS_SOC_ROUTER: return (BHND_PROBE_BUS + BHND_PROBE_ORDER_LATE); case BHND_DEVCLASS_SOC_BRIDGE: return (BHND_PROBE_BUS + BHND_PROBE_ORDER_LAST); case BHND_DEVCLASS_CPU: return (BHND_PROBE_CPU + BHND_PROBE_ORDER_FIRST); case BHND_DEVCLASS_RAM: /* fall through */ case BHND_DEVCLASS_MEMC: return (BHND_PROBE_CPU + BHND_PROBE_ORDER_EARLY); case BHND_DEVCLASS_NVRAM: return (BHND_PROBE_RESOURCE + BHND_PROBE_ORDER_EARLY); case BHND_DEVCLASS_PCI: case BHND_DEVCLASS_PCIE: case BHND_DEVCLASS_PCCARD: case BHND_DEVCLASS_ENET: case BHND_DEVCLASS_ENET_MAC: case BHND_DEVCLASS_ENET_PHY: case BHND_DEVCLASS_WLAN: case BHND_DEVCLASS_WLAN_MAC: case BHND_DEVCLASS_WLAN_PHY: case BHND_DEVCLASS_EROM: case BHND_DEVCLASS_OTHER: case BHND_DEVCLASS_INVALID: if (bhnd_bus_find_hostb_device(dev) == child) return (BHND_PROBE_ROOT + BHND_PROBE_ORDER_EARLY); return (BHND_PROBE_DEFAULT); default: return (BHND_PROBE_DEFAULT); } } /** * Default bhnd(4) bus driver implementation of BHND_BUS_ALLOC_PMU(). */ int bhnd_generic_alloc_pmu(device_t dev, device_t child) { struct bhnd_softc *sc; struct bhnd_resource *r; struct bhnd_core_clkctl *clkctl; struct resource_list *rl; struct resource_list_entry *rle; device_t pmu_dev; bhnd_addr_t r_addr; bhnd_size_t r_size; bus_size_t pmu_regs; u_int max_latency; int error; GIANT_REQUIRED; /* for newbus */ if (device_get_parent(child) != dev) return (EINVAL); sc = device_get_softc(dev); clkctl = bhnd_get_pmu_info(child); pmu_regs = BHND_CLK_CTL_ST; /* already allocated? */ if (clkctl != NULL) { panic("duplicate PMU allocation for %s", device_get_nameunit(child)); } /* Determine address+size of the core's PMU register block */ error = bhnd_get_region_addr(child, BHND_PORT_DEVICE, 0, 0, &r_addr, &r_size); if (error) { device_printf(sc->dev, "error fetching register block info for " "%s: %d\n", device_get_nameunit(child), error); return (error); } if (r_size < (pmu_regs + sizeof(uint32_t))) { device_printf(sc->dev, "pmu offset %#jx would overrun %s " "register block\n", (uintmax_t)pmu_regs, device_get_nameunit(child)); return (ENODEV); } /* Locate actual resource containing the core's register block */ if ((rl = BUS_GET_RESOURCE_LIST(dev, child)) == NULL) { device_printf(dev, "NULL resource list returned for %s\n", device_get_nameunit(child)); return (ENXIO); } if ((rle = resource_list_find(rl, SYS_RES_MEMORY, 0)) == NULL) { device_printf(dev, "cannot locate core register resource " "for %s\n", device_get_nameunit(child)); return (ENXIO); } if (rle->res == NULL) { device_printf(dev, "core register resource unallocated for " "%s\n", device_get_nameunit(child)); return (ENXIO); } if (r_addr+pmu_regs < rman_get_start(rle->res) || r_addr+pmu_regs >= rman_get_end(rle->res)) { device_printf(dev, "core register resource does not map PMU " "registers at %#jx\n for %s\n", r_addr+pmu_regs, device_get_nameunit(child)); return (ENXIO); } /* Adjust PMU register offset relative to the actual start address * of the core's register block allocation. * * XXX: The saved offset will be invalid if bus_adjust_resource is * used to modify the resource's start address. */ if (rman_get_start(rle->res) > r_addr) pmu_regs -= rman_get_start(rle->res) - r_addr; else pmu_regs -= r_addr - rman_get_start(rle->res); /* Retain a PMU reference for the clkctl instance state */ pmu_dev = bhnd_retain_provider(child, BHND_SERVICE_PMU); if (pmu_dev == NULL) { device_printf(sc->dev, "PMU not found\n"); return (ENXIO); } /* Fetch the maximum transition latency from our PMU */ max_latency = bhnd_pmu_get_max_transition_latency(pmu_dev); /* Allocate a new bhnd_resource wrapping the standard resource we * fetched from the resource list; we'll free this in * bhnd_generic_release_pmu() */ r = malloc(sizeof(struct bhnd_resource), M_BHND, M_NOWAIT); if (r == NULL) { bhnd_release_provider(child, pmu_dev, BHND_SERVICE_PMU); return (ENOMEM); } r->res = rle->res; r->direct = ((rman_get_flags(rle->res) & RF_ACTIVE) != 0); /* Allocate the clkctl instance */ clkctl = bhnd_alloc_core_clkctl(child, pmu_dev, r, pmu_regs, max_latency); if (clkctl == NULL) { free(r, M_BHND); bhnd_release_provider(child, pmu_dev, BHND_SERVICE_PMU); return (ENOMEM); } bhnd_set_pmu_info(child, clkctl); return (0); } /** * Default bhnd(4) bus driver implementation of BHND_BUS_RELEASE_PMU(). */ int bhnd_generic_release_pmu(device_t dev, device_t child) { struct bhnd_softc *sc; struct bhnd_core_clkctl *clkctl; struct bhnd_resource *r; device_t pmu_dev; GIANT_REQUIRED; /* for newbus */ sc = device_get_softc(dev); if (device_get_parent(child) != dev) return (EINVAL); clkctl = bhnd_get_pmu_info(child); if (clkctl == NULL) panic("pmu over-release for %s", device_get_nameunit(child)); /* Clear all FORCE, AREQ, and ERSRC flags, unless we're already in * RESET. Suspending a core clears clkctl automatically (and attempting * to access the PMU registers in a suspended core will trigger a * system livelock). */ if (!bhnd_is_hw_suspended(clkctl->cc_dev)) { BHND_CLKCTL_LOCK(clkctl); /* Clear all FORCE, AREQ, and ERSRC flags */ BHND_CLKCTL_SET_4(clkctl, 0x0, BHND_CCS_FORCE_MASK | BHND_CCS_AREQ_MASK | BHND_CCS_ERSRC_REQ_MASK); BHND_CLKCTL_UNLOCK(clkctl); } /* Clear child's PMU info reference */ bhnd_set_pmu_info(child, NULL); /* Before freeing the clkctl instance, save a pointer to resources we * need to clean up manually */ r = clkctl->cc_res; pmu_dev = clkctl->cc_pmu_dev; /* Free the clkctl instance */ bhnd_free_core_clkctl(clkctl); /* Free the child's bhnd resource wrapper */ free(r, M_BHND); /* Release the child's PMU provider reference */ bhnd_release_provider(child, pmu_dev, BHND_SERVICE_PMU); return (0); } /** * Default bhnd(4) bus driver implementation of BHND_BUS_GET_CLOCK_LATENCY(). */ int bhnd_generic_get_clock_latency(device_t dev, device_t child, bhnd_clock clock, u_int *latency) { struct bhnd_core_clkctl *clkctl; if (device_get_parent(child) != dev) return (EINVAL); if ((clkctl = bhnd_get_pmu_info(child)) == NULL) panic("no active PMU allocation"); return (bhnd_pmu_get_clock_latency(clkctl->cc_pmu_dev, clock, latency)); } /** * Default bhnd(4) bus driver implementation of BHND_BUS_GET_CLOCK_FREQ(). */ int bhnd_generic_get_clock_freq(device_t dev, device_t child, bhnd_clock clock, u_int *freq) { struct bhnd_core_clkctl *clkctl; if (device_get_parent(child) != dev) return (EINVAL); if ((clkctl = bhnd_get_pmu_info(child)) == NULL) panic("no active PMU allocation"); return (bhnd_pmu_get_clock_freq(clkctl->cc_pmu_dev, clock, freq)); } /** * Default bhnd(4) bus driver implementation of BHND_BUS_REQUEST_CLOCK(). */ int bhnd_generic_request_clock(device_t dev, device_t child, bhnd_clock clock) { struct bhnd_softc *sc; struct bhnd_core_clkctl *clkctl; uint32_t avail; uint32_t req; int error; sc = device_get_softc(dev); if (device_get_parent(child) != dev) return (EINVAL); if ((clkctl = bhnd_get_pmu_info(child)) == NULL) panic("no active PMU allocation"); BHND_ASSERT_CLKCTL_AVAIL(clkctl); avail = 0x0; req = 0x0; switch (clock) { case BHND_CLOCK_DYN: break; case BHND_CLOCK_ILP: req |= BHND_CCS_FORCEILP; break; case BHND_CLOCK_ALP: req |= BHND_CCS_FORCEALP; avail |= BHND_CCS_ALPAVAIL; break; case BHND_CLOCK_HT: req |= BHND_CCS_FORCEHT; avail |= BHND_CCS_HTAVAIL; break; default: device_printf(dev, "%s requested unknown clock: %#x\n", device_get_nameunit(clkctl->cc_dev), clock); return (ENODEV); } BHND_CLKCTL_LOCK(clkctl); /* Issue request */ BHND_CLKCTL_SET_4(clkctl, req, BHND_CCS_FORCE_MASK); /* Wait for clock availability */ error = bhnd_core_clkctl_wait(clkctl, avail, avail); BHND_CLKCTL_UNLOCK(clkctl); return (error); } /** * Default bhnd(4) bus driver implementation of BHND_BUS_ENABLE_CLOCKS(). */ int bhnd_generic_enable_clocks(device_t dev, device_t child, uint32_t clocks) { struct bhnd_core_clkctl *clkctl; uint32_t avail; uint32_t req; int error; if (device_get_parent(child) != dev) return (EINVAL); if ((clkctl = bhnd_get_pmu_info(child)) == NULL) panic("no active PMU allocation"); BHND_ASSERT_CLKCTL_AVAIL(clkctl); avail = 0x0; req = 0x0; /* Build clock request flags */ if (clocks & BHND_CLOCK_DYN) /* nothing to enable */ clocks &= ~BHND_CLOCK_DYN; if (clocks & BHND_CLOCK_ILP) /* nothing to enable */ clocks &= ~BHND_CLOCK_ILP; if (clocks & BHND_CLOCK_ALP) { req |= BHND_CCS_ALPAREQ; avail |= BHND_CCS_ALPAVAIL; clocks &= ~BHND_CLOCK_ALP; } if (clocks & BHND_CLOCK_HT) { req |= BHND_CCS_HTAREQ; avail |= BHND_CCS_HTAVAIL; clocks &= ~BHND_CLOCK_HT; } /* Check for unknown clock values */ if (clocks != 0x0) { device_printf(dev, "%s requested unknown clocks: %#x\n", device_get_nameunit(clkctl->cc_dev), clocks); return (ENODEV); } BHND_CLKCTL_LOCK(clkctl); /* Issue request */ BHND_CLKCTL_SET_4(clkctl, req, BHND_CCS_AREQ_MASK); /* Wait for clock availability */ error = bhnd_core_clkctl_wait(clkctl, avail, avail); BHND_CLKCTL_UNLOCK(clkctl); return (error); } /** * Default bhnd(4) bus driver implementation of BHND_BUS_REQUEST_EXT_RSRC(). */ int bhnd_generic_request_ext_rsrc(device_t dev, device_t child, u_int rsrc) { struct bhnd_softc *sc; struct bhnd_core_clkctl *clkctl; uint32_t req; uint32_t avail; int error; sc = device_get_softc(dev); if (device_get_parent(child) != dev) return (EINVAL); if ((clkctl = bhnd_get_pmu_info(child)) == NULL) panic("no active PMU allocation"); BHND_ASSERT_CLKCTL_AVAIL(clkctl); sc = device_get_softc(dev); if (rsrc > BHND_CCS_ERSRC_MAX) return (EINVAL); req = BHND_CCS_SET_BITS((1< BHND_CCS_ERSRC_MAX) return (EINVAL); mask = BHND_CCS_SET_BITS((1<= bhnd_get_port_count(child, type)) return (false); if (region >= bhnd_get_region_count(child, type, port)) return (false); return (true); } /** * Default bhnd(4) bus driver implementation of BHND_BUS_GET_NVRAM_VAR(). * * This implementation searches @p dev for a registered NVRAM child device. * * If no NVRAM device is registered with @p dev, the request is delegated to * the BHND_BUS_GET_NVRAM_VAR() method on the parent of @p dev. */ int bhnd_generic_get_nvram_var(device_t dev, device_t child, const char *name, void *buf, size_t *size, bhnd_nvram_type type) { struct bhnd_softc *sc; device_t nvram, parent; int error; sc = device_get_softc(dev); /* If a NVRAM device is available, consult it first */ nvram = bhnd_retain_provider(child, BHND_SERVICE_NVRAM); if (nvram != NULL) { error = BHND_NVRAM_GETVAR(nvram, name, buf, size, type); bhnd_release_provider(child, nvram, BHND_SERVICE_NVRAM); return (error); } /* Otherwise, try to delegate to parent */ if ((parent = device_get_parent(dev)) == NULL) return (ENODEV); return (BHND_BUS_GET_NVRAM_VAR(device_get_parent(dev), child, name, buf, size, type)); } /** * Default bhnd(4) bus driver implementation of BUS_PRINT_CHILD(). * * This implementation requests the device's struct resource_list via * BUS_GET_RESOURCE_LIST. */ int bhnd_generic_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%#jd"); } retval += printf(" at core %u", bhnd_get_core_index(child)); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } /** * Default bhnd(4) bus driver implementation of BUS_PROBE_NOMATCH(). * * This implementation requests the device's struct resource_list via * BUS_GET_RESOURCE_LIST. */ void bhnd_generic_probe_nomatch(device_t dev, device_t child) { struct resource_list *rl; const struct bhnd_nomatch *nm; bool report; /* Fetch reporting configuration for this device */ report = true; for (nm = bhnd_nomatch_table; nm->device != BHND_COREID_INVALID; nm++) { if (nm->vendor != bhnd_get_vendor(child)) continue; if (nm->device != bhnd_get_device(child)) continue; report = false; if (bootverbose && nm->if_verbose) report = true; break; } if (!report) return; /* Print the non-matched device info */ device_printf(dev, "<%s %s, rev %hhu>", bhnd_get_vendor_name(child), bhnd_get_device_name(child), bhnd_get_hwrev(child)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%#jd"); } printf(" at core %u (no driver attached)\n", bhnd_get_core_index(child)); } -/** - * Default implementation of BUS_CHILD_PNPINFO_STR(). - */ static int -bhnd_child_pnpinfo_str(device_t dev, device_t child, char *buf, - size_t buflen) +bhnd_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) { - if (device_get_parent(child) != dev) { - return (BUS_CHILD_PNPINFO_STR(device_get_parent(dev), child, - buf, buflen)); - } + if (device_get_parent(child) != dev) + return (BUS_CHILD_PNPINFO(device_get_parent(dev), child, sb)); - snprintf(buf, buflen, "vendor=0x%hx device=0x%hx rev=0x%hhx", + sbuf_printf(sb, "vendor=0x%hx device=0x%hx rev=0x%hhx", bhnd_get_vendor(child), bhnd_get_device(child), bhnd_get_hwrev(child)); return (0); } -/** - * Default implementation of BUS_CHILD_LOCATION_STR(). - */ static int -bhnd_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) +bhnd_child_location(device_t dev, device_t child, struct sbuf *sb) { bhnd_addr_t addr; bhnd_size_t size; - if (device_get_parent(child) != dev) { - return (BUS_CHILD_LOCATION_STR(device_get_parent(dev), child, - buf, buflen)); - } + if (device_get_parent(child) != dev) + return (BUS_CHILD_LOCATION(device_get_parent(dev), child, sb)); - if (bhnd_get_region_addr(child, BHND_PORT_DEVICE, 0, 0, &addr, &size)) { - /* No device default port/region */ - if (buflen > 0) - *buf = '\0'; + if (bhnd_get_region_addr(child, BHND_PORT_DEVICE, 0, 0, &addr, &size)) return (0); - } - snprintf(buf, buflen, "port0.0=0x%llx", (unsigned long long) addr); + sbuf_printf(sb, "port0.0=0x%llx", (unsigned long long) addr); return (0); } /** * Default bhnd(4) bus driver implementation of BUS_CHILD_DELETED(). * * This implementation manages internal bhnd(4) state, and must be called * by subclassing drivers. */ void bhnd_generic_child_deleted(device_t dev, device_t child) { struct bhnd_softc *sc; sc = device_get_softc(dev); /* Free device info */ if (bhnd_get_pmu_info(child) != NULL) { /* Releasing PMU requests automatically would be nice, * but we can't reference per-core PMU register * resource after driver detach */ panic("%s leaked device pmu state\n", device_get_nameunit(child)); } } /** * Helper function for implementing BUS_SUSPEND_CHILD(). * * TODO: Power management * * If @p child is not a direct child of @p dev, suspension is delegated to * the @p dev parent. */ int bhnd_generic_suspend_child(device_t dev, device_t child) { if (device_get_parent(child) != dev) BUS_SUSPEND_CHILD(device_get_parent(dev), child); return bus_generic_suspend_child(dev, child); } /** * Helper function for implementing BUS_RESUME_CHILD(). * * TODO: Power management * * If @p child is not a direct child of @p dev, suspension is delegated to * the @p dev parent. */ int bhnd_generic_resume_child(device_t dev, device_t child) { if (device_get_parent(child) != dev) BUS_RESUME_CHILD(device_get_parent(dev), child); return bus_generic_resume_child(dev, child); } /** * Default bhnd(4) bus driver implementation of BUS_SETUP_INTR(). * * This implementation of BUS_SETUP_INTR() will delegate interrupt setup * to the parent of @p dev, if any. */ int bhnd_generic_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { return (bus_generic_setup_intr(dev, child, irq, flags, filter, intr, arg, cookiep)); } /* * Delegate all indirect I/O to the parent device. When inherited by * non-bridged bus implementations, resources will never be marked as * indirect, and these methods will never be called. */ #define BHND_IO_READ(_type, _name, _method) \ static _type \ bhnd_read_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset) \ { \ return (BHND_BUS_READ_ ## _method( \ device_get_parent(dev), child, r, offset)); \ } #define BHND_IO_WRITE(_type, _name, _method) \ static void \ bhnd_write_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset, _type value) \ { \ return (BHND_BUS_WRITE_ ## _method( \ device_get_parent(dev), child, r, offset, \ value)); \ } #define BHND_IO_MISC(_type, _op, _method) \ static void \ bhnd_ ## _op (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset, _type datap, \ bus_size_t count) \ { \ BHND_BUS_ ## _method(device_get_parent(dev), child, r, \ offset, datap, count); \ } #define BHND_IO_METHODS(_type, _size) \ BHND_IO_READ(_type, _size, _size) \ BHND_IO_WRITE(_type, _size, _size) \ \ BHND_IO_READ(_type, stream_ ## _size, STREAM_ ## _size) \ BHND_IO_WRITE(_type, stream_ ## _size, STREAM_ ## _size) \ \ BHND_IO_MISC(_type*, read_multi_ ## _size, \ READ_MULTI_ ## _size) \ BHND_IO_MISC(_type*, write_multi_ ## _size, \ WRITE_MULTI_ ## _size) \ \ BHND_IO_MISC(_type*, read_multi_stream_ ## _size, \ READ_MULTI_STREAM_ ## _size) \ BHND_IO_MISC(_type*, write_multi_stream_ ## _size, \ WRITE_MULTI_STREAM_ ## _size) \ \ BHND_IO_MISC(_type, set_multi_ ## _size, SET_MULTI_ ## _size) \ BHND_IO_MISC(_type, set_region_ ## _size, SET_REGION_ ## _size) \ \ BHND_IO_MISC(_type*, read_region_ ## _size, \ READ_REGION_ ## _size) \ BHND_IO_MISC(_type*, write_region_ ## _size, \ WRITE_REGION_ ## _size) \ \ BHND_IO_MISC(_type*, read_region_stream_ ## _size, \ READ_REGION_STREAM_ ## _size) \ BHND_IO_MISC(_type*, write_region_stream_ ## _size, \ WRITE_REGION_STREAM_ ## _size) \ BHND_IO_METHODS(uint8_t, 1); BHND_IO_METHODS(uint16_t, 2); BHND_IO_METHODS(uint32_t, 4); static void bhnd_barrier(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, bus_size_t length, int flags) { BHND_BUS_BARRIER(device_get_parent(dev), child, r, offset, length, flags); } static device_method_t bhnd_methods[] = { /* Device interface */ \ DEVMETHOD(device_attach, bhnd_generic_attach), DEVMETHOD(device_detach, bhnd_generic_detach), DEVMETHOD(device_shutdown, bhnd_generic_shutdown), DEVMETHOD(device_suspend, bhnd_generic_suspend), DEVMETHOD(device_resume, bhnd_generic_resume), /* Bus interface */ DEVMETHOD(bus_child_deleted, bhnd_generic_child_deleted), DEVMETHOD(bus_probe_nomatch, bhnd_generic_probe_nomatch), DEVMETHOD(bus_print_child, bhnd_generic_print_child), - DEVMETHOD(bus_child_pnpinfo_str, bhnd_child_pnpinfo_str), - DEVMETHOD(bus_child_location_str, bhnd_child_location_str), + DEVMETHOD(bus_child_pnpinfo, bhnd_child_pnpinfo), + DEVMETHOD(bus_child_location, bhnd_child_location), DEVMETHOD(bus_suspend_child, bhnd_generic_suspend_child), DEVMETHOD(bus_resume_child, bhnd_generic_resume_child), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bhnd_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_config_intr, bus_generic_config_intr), DEVMETHOD(bus_bind_intr, bus_generic_bind_intr), DEVMETHOD(bus_describe_intr, bus_generic_describe_intr), DEVMETHOD(bus_get_dma_tag, bus_generic_get_dma_tag), /* BHND interface */ DEVMETHOD(bhnd_bus_get_chipid, bhnd_bus_generic_get_chipid), DEVMETHOD(bhnd_bus_is_hw_disabled, bhnd_bus_generic_is_hw_disabled), DEVMETHOD(bhnd_bus_get_probe_order, bhnd_generic_get_probe_order), DEVMETHOD(bhnd_bus_alloc_pmu, bhnd_generic_alloc_pmu), DEVMETHOD(bhnd_bus_release_pmu, bhnd_generic_release_pmu), DEVMETHOD(bhnd_bus_request_clock, bhnd_generic_request_clock), DEVMETHOD(bhnd_bus_enable_clocks, bhnd_generic_enable_clocks), DEVMETHOD(bhnd_bus_request_ext_rsrc, bhnd_generic_request_ext_rsrc), DEVMETHOD(bhnd_bus_release_ext_rsrc, bhnd_generic_release_ext_rsrc), DEVMETHOD(bhnd_bus_get_clock_latency, bhnd_generic_get_clock_latency), DEVMETHOD(bhnd_bus_get_clock_freq, bhnd_generic_get_clock_freq), DEVMETHOD(bhnd_bus_is_region_valid, bhnd_generic_is_region_valid), DEVMETHOD(bhnd_bus_get_nvram_var, bhnd_generic_get_nvram_var), /* BHND interface (bus I/O) */ DEVMETHOD(bhnd_bus_read_1, bhnd_read_1), DEVMETHOD(bhnd_bus_read_2, bhnd_read_2), DEVMETHOD(bhnd_bus_read_4, bhnd_read_4), DEVMETHOD(bhnd_bus_write_1, bhnd_write_1), DEVMETHOD(bhnd_bus_write_2, bhnd_write_2), DEVMETHOD(bhnd_bus_write_4, bhnd_write_4), DEVMETHOD(bhnd_bus_read_stream_1, bhnd_read_stream_1), DEVMETHOD(bhnd_bus_read_stream_2, bhnd_read_stream_2), DEVMETHOD(bhnd_bus_read_stream_4, bhnd_read_stream_4), DEVMETHOD(bhnd_bus_write_stream_1, bhnd_write_stream_1), DEVMETHOD(bhnd_bus_write_stream_2, bhnd_write_stream_2), DEVMETHOD(bhnd_bus_write_stream_4, bhnd_write_stream_4), DEVMETHOD(bhnd_bus_read_multi_1, bhnd_read_multi_1), DEVMETHOD(bhnd_bus_read_multi_2, bhnd_read_multi_2), DEVMETHOD(bhnd_bus_read_multi_4, bhnd_read_multi_4), DEVMETHOD(bhnd_bus_write_multi_1, bhnd_write_multi_1), DEVMETHOD(bhnd_bus_write_multi_2, bhnd_write_multi_2), DEVMETHOD(bhnd_bus_write_multi_4, bhnd_write_multi_4), DEVMETHOD(bhnd_bus_read_multi_stream_1, bhnd_read_multi_stream_1), DEVMETHOD(bhnd_bus_read_multi_stream_2, bhnd_read_multi_stream_2), DEVMETHOD(bhnd_bus_read_multi_stream_4, bhnd_read_multi_stream_4), DEVMETHOD(bhnd_bus_write_multi_stream_1,bhnd_write_multi_stream_1), DEVMETHOD(bhnd_bus_write_multi_stream_2,bhnd_write_multi_stream_2), DEVMETHOD(bhnd_bus_write_multi_stream_4,bhnd_write_multi_stream_4), DEVMETHOD(bhnd_bus_set_multi_1, bhnd_set_multi_1), DEVMETHOD(bhnd_bus_set_multi_2, bhnd_set_multi_2), DEVMETHOD(bhnd_bus_set_multi_4, bhnd_set_multi_4), DEVMETHOD(bhnd_bus_set_region_1, bhnd_set_region_1), DEVMETHOD(bhnd_bus_set_region_2, bhnd_set_region_2), DEVMETHOD(bhnd_bus_set_region_4, bhnd_set_region_4), DEVMETHOD(bhnd_bus_read_region_1, bhnd_read_region_1), DEVMETHOD(bhnd_bus_read_region_2, bhnd_read_region_2), DEVMETHOD(bhnd_bus_read_region_4, bhnd_read_region_4), DEVMETHOD(bhnd_bus_write_region_1, bhnd_write_region_1), DEVMETHOD(bhnd_bus_write_region_2, bhnd_write_region_2), DEVMETHOD(bhnd_bus_write_region_4, bhnd_write_region_4), DEVMETHOD(bhnd_bus_read_region_stream_1,bhnd_read_region_stream_1), DEVMETHOD(bhnd_bus_read_region_stream_2,bhnd_read_region_stream_2), DEVMETHOD(bhnd_bus_read_region_stream_4,bhnd_read_region_stream_4), DEVMETHOD(bhnd_bus_write_region_stream_1, bhnd_write_region_stream_1), DEVMETHOD(bhnd_bus_write_region_stream_2, bhnd_write_region_stream_2), DEVMETHOD(bhnd_bus_write_region_stream_4, bhnd_write_region_stream_4), DEVMETHOD(bhnd_bus_barrier, bhnd_barrier), DEVMETHOD_END }; devclass_t bhnd_devclass; /**< bhnd bus. */ devclass_t bhnd_hostb_devclass; /**< bhnd bus host bridge. */ devclass_t bhnd_nvram_devclass; /**< bhnd NVRAM device */ DEFINE_CLASS_0(bhnd, bhnd_driver, bhnd_methods, sizeof(struct bhnd_softc)); MODULE_VERSION(bhnd, 1); diff --git a/sys/dev/bhnd/bhndb/bhndb.c b/sys/dev/bhnd/bhndb/bhndb.c index c90ed9abf671..9a2beb50e9bb 100644 --- a/sys/dev/bhnd/bhndb/bhndb.c +++ b/sys/dev/bhnd/bhndb/bhndb.c @@ -1,2327 +1,2318 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Abstract BHND Bridge Device Driver * * Provides generic support for bridging from a parent bus (such as PCI) to * a BHND-compatible bus (e.g. bcma or siba). */ #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include "bhnd_chipc_if.h" #include "bhnd_nvram_if.h" #include "bhndbvar.h" #include "bhndb_bus_if.h" #include "bhndb_hwdata.h" #include "bhndb_private.h" /* Debugging flags */ static u_long bhndb_debug = 0; TUNABLE_ULONG("hw.bhndb.debug", &bhndb_debug); enum { BHNDB_DEBUG_PRIO = 1 << 0, }; #define BHNDB_DEBUG(_type) (BHNDB_DEBUG_ ## _type & bhndb_debug) static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw); static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *r, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table); static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw); bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child); static struct rman *bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type); static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size); static int bhndb_activate_static_region( struct bhndb_softc *sc, struct bhndb_region *region, device_t child, int type, int rid, struct resource *r); static int bhndb_try_activate_resource( struct bhndb_softc *sc, device_t child, int type, int rid, struct resource *r, bool *indirect); static inline struct bhndb_dw_alloc *bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore); /** * Default bhndb(4) implementation of DEVICE_PROBE(). * * This function provides the default bhndb implementation of DEVICE_PROBE(), * and is compatible with bhndb(4) bridges attached via bhndb_attach_bridge(). */ int bhndb_generic_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static void bhndb_probe_nomatch(device_t dev, device_t child) { const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> (no driver attached)\n", name); } static int bhndb_print_child(device_t dev, device_t child) { struct bhndb_softc *sc; struct resource_list *rl; int retval = 0; sc = device_get_softc(dev); retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static int -bhndb_child_pnpinfo_str(device_t bus, device_t child, char *buf, - size_t buflen) -{ - *buf = '\0'; - return (0); -} - -static int -bhndb_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) +bhndb_child_location(device_t dev, device_t child, struct sbuf *sb) { struct bhndb_softc *sc; sc = device_get_softc(dev); - snprintf(buf, buflen, "base=0x%llx", + sbuf_printf(sb, "base=0x%llx", (unsigned long long) sc->chipid.enum_addr); return (0); } /** * Return true if @p cores matches the @p hw specification. * * @param sc BHNDB device state. * @param cores A device table to match against. * @param ncores The number of cores in @p cores. * @param hw The hardware description to be matched against. */ static bool bhndb_hw_matches(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw *hw) { for (u_int i = 0; i < hw->num_hw_reqs; i++) { const struct bhnd_core_match *match; bool found; match = &hw->hw_reqs[i]; found = false; for (u_int d = 0; d < ncores; d++) { struct bhnd_core_info *core = &cores[d]; if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; if (!bhnd_core_matches(core, match)) continue; found = true; break; } if (!found) return (false); } return (true); } /** * Initialize the region maps and priority configuration in @p br using * the priority @p table and the set of cores enumerated by @p erom. * * @param sc The bhndb device state. * @param br The resource state to be configured. * @param erom EROM parser used to enumerate @p cores. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param table Hardware priority table to be used to determine the relative * priorities of per-core port resources. */ static int bhndb_init_region_cfg(struct bhndb_softc *sc, bhnd_erom_t *erom, struct bhndb_resources *br, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw_priority *table) { const struct bhndb_hw_priority *hp; bhnd_addr_t addr; bhnd_size_t size; size_t prio_low, prio_default, prio_high; int error; /* The number of port regions per priority band that must be accessible * via dynamic register windows */ prio_low = 0; prio_default = 0; prio_high = 0; /* * Register bridge regions covering all statically mapped ports. */ for (u_int i = 0; i < ncores; i++) { const struct bhndb_regwin *regw; struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); for (regw = br->cfg->register_windows; regw->win_type != BHNDB_REGWIN_T_INVALID; regw++) { const struct bhndb_port_priority *pp; uint32_t alloc_flags; /* Only core windows are supported */ if (regw->win_type != BHNDB_REGWIN_T_CORE) continue; /* Skip non-matching cores. */ if (!bhndb_regwin_match_core(regw, core)) continue; /* Fetch the base address of the mapped port */ error = bhnd_erom_lookup_core_addr(erom, &md, regw->d.core.port_type, regw->d.core.port, regw->d.core.region, NULL, &addr, &size); if (error) { /* Skip non-applicable register windows */ if (error == ENOENT) continue; return (error); } /* * Apply the register window's region offset, if any. */ if (regw->d.core.offset > size) { device_printf(sc->dev, "invalid register " "window offset %#jx for region %#jx+%#jx\n", regw->d.core.offset, addr, size); return (EINVAL); } addr += regw->d.core.offset; /* * Always defer to the register window's size. * * If the port size is smaller than the window size, * this ensures that we fully utilize register windows * larger than the referenced port. * * If the port size is larger than the window size, this * ensures that we do not directly map the allocations * within the region to a too-small window. */ size = regw->win_size; /* Fetch allocation flags from the corresponding port * priority entry, if any */ pp = bhndb_hw_priorty_find_port(table, core, regw->d.core.port_type, regw->d.core.port, regw->d.core.region); if (pp != NULL) { alloc_flags = pp->alloc_flags; } else { alloc_flags = 0; } /* * Add to the bus region list. * * The window priority for a statically mapped region is * always HIGH. */ error = bhndb_add_resource_region(br, addr, size, BHNDB_PRIORITY_HIGH, alloc_flags, regw); if (error) return (error); } } /* * Perform priority accounting and register bridge regions for all * ports defined in the priority table */ for (u_int i = 0; i < ncores; i++) { struct bhnd_core_info *core; struct bhnd_core_match md; core = &cores[i]; md = bhnd_core_get_match_desc(core); /* * Skip priority accounting for cores that ... */ /* ... do not require bridge resources */ if (BHNDB_IS_CORE_DISABLED(sc->dev, sc->bus_dev, core)) continue; /* ... do not have a priority table entry */ hp = bhndb_hw_priority_find_core(table, core); if (hp == NULL) continue; /* ... are explicitly disabled in the priority table. */ if (hp->priority == BHNDB_PRIORITY_NONE) continue; /* Determine the number of dynamic windows required and * register their bus_region entries. */ for (u_int i = 0; i < hp->num_ports; i++) { const struct bhndb_port_priority *pp; pp = &hp->ports[i]; /* Fetch the address+size of the mapped port. */ error = bhnd_erom_lookup_core_addr(erom, &md, pp->type, pp->port, pp->region, NULL, &addr, &size); if (error) { /* Skip ports not defined on this device */ if (error == ENOENT) continue; return (error); } /* Skip ports with an existing static mapping */ if (bhndb_has_static_region_mapping(br, addr, size)) continue; /* Define a dynamic region for this port */ error = bhndb_add_resource_region(br, addr, size, pp->priority, pp->alloc_flags, NULL); if (error) return (error); /* Update port mapping counts */ switch (pp->priority) { case BHNDB_PRIORITY_NONE: break; case BHNDB_PRIORITY_LOW: prio_low++; break; case BHNDB_PRIORITY_DEFAULT: prio_default++; break; case BHNDB_PRIORITY_HIGH: prio_high++; break; } } } /* Determine the minimum priority at which we'll allocate direct * register windows from our dynamic pool */ size_t prio_total = prio_low + prio_default + prio_high; if (prio_total <= br->dwa_count) { /* low+default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_LOW; } else if (prio_default + prio_high <= br->dwa_count) { /* default+high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_DEFAULT; } else { /* high priority regions get windows */ br->min_prio = BHNDB_PRIORITY_HIGH; } if (BHNDB_DEBUG(PRIO)) { struct bhndb_region *region; const char *direct_msg, *type_msg; bhndb_priority_t prio, prio_min; uint32_t flags; prio_min = br->min_prio; device_printf(sc->dev, "min_prio: %d\n", prio_min); STAILQ_FOREACH(region, &br->bus_regions, link) { prio = region->priority; flags = region->alloc_flags; direct_msg = prio >= prio_min ? "direct" : "indirect"; type_msg = region->static_regwin ? "static" : "dynamic"; device_printf(sc->dev, "region 0x%llx+0x%llx priority " "%u %s/%s", (unsigned long long) region->addr, (unsigned long long) region->size, region->priority, direct_msg, type_msg); if (flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) printf(" [overcommit]\n"); else printf("\n"); } } return (0); } /** * Find a hardware specification for @p dev. * * @param sc The bhndb device state. * @param cores All cores enumerated on the bridged bhnd bus. * @param ncores The length of @p cores. * @param[out] hw On success, the matched hardware specification. * with @p dev. * * @retval 0 success * @retval non-zero if an error occurs fetching device info for comparison. */ static int bhndb_find_hwspec(struct bhndb_softc *sc, struct bhnd_core_info *cores, u_int ncores, const struct bhndb_hw **hw) { const struct bhndb_hw *next, *hw_table; /* Search for the first matching hardware config. */ hw_table = BHNDB_BUS_GET_HARDWARE_TABLE(sc->parent_dev, sc->dev); for (next = hw_table; next->hw_reqs != NULL; next++) { if (!bhndb_hw_matches(sc, cores, ncores, next)) continue; /* Found */ *hw = next; return (0); } return (ENOENT); } /** * Helper function that must be called by subclass bhndb(4) drivers * when implementing DEVICE_ATTACH() before calling any bhnd(4) or bhndb(4) * APIs on the bridge device. * * This function will add a bridged bhnd(4) child device with a device order of * BHND_PROBE_BUS. Any subclass bhndb(4) driver may use the BHND_PROBE_* * priority bands to add additional devices that will be attached in * their preferred order relative to the bridged bhnd(4) bus. * * @param dev The bridge device to attach. * @param cid The bridged device's chip identification. * @param cores The bridged device's core table. * @param ncores The number of cores in @p cores. * @param bridge_core Core info for the bhnd(4) core serving as the host * bridge. * @param erom_class An erom parser class that may be used to parse * the bridged device's device enumeration table. */ int bhndb_attach(device_t dev, struct bhnd_chipid *cid, struct bhnd_core_info *cores, u_int ncores, struct bhnd_core_info *bridge_core, bhnd_erom_class_t *erom_class) { struct bhndb_devinfo *dinfo; struct bhndb_softc *sc; const struct bhndb_hw *hw; const struct bhndb_hwcfg *hwcfg; const struct bhndb_hw_priority *hwprio; struct bhnd_erom_io *eio; bhnd_erom_t *erom; int error; sc = device_get_softc(dev); sc->dev = dev; sc->parent_dev = device_get_parent(dev); sc->bridge_core = *bridge_core; sc->chipid = *cid; if ((error = bhnd_service_registry_init(&sc->services))) return (error); BHNDB_LOCK_INIT(sc); erom = NULL; /* Find a matching bridge hardware configuration */ if ((error = bhndb_find_hwspec(sc, cores, ncores, &hw))) { device_printf(sc->dev, "unable to identify device, " " using generic bridge resource definitions\n"); hwcfg = BHNDB_BUS_GET_GENERIC_HWCFG(sc->parent_dev, dev); hw = NULL; } else { hwcfg = hw->cfg; } if (hw != NULL && (bootverbose || BHNDB_DEBUG(PRIO))) { device_printf(sc->dev, "%s resource configuration\n", hw->name); } /* Allocate bridge resource state using the discovered hardware * configuration */ sc->bus_res = bhndb_alloc_resources(sc->dev, sc->parent_dev, hwcfg); if (sc->bus_res == NULL) { device_printf(sc->dev, "failed to allocate bridge resource " "state\n"); error = ENOMEM; goto failed; } /* Add our bridged bus device */ sc->bus_dev = BUS_ADD_CHILD(dev, BHND_PROBE_BUS, "bhnd", -1); if (sc->bus_dev == NULL) { error = ENXIO; goto failed; } dinfo = device_get_ivars(sc->bus_dev); dinfo->addrspace = BHNDB_ADDRSPACE_BRIDGED; /* We can now use bhndb to perform bridging of SYS_RES_MEMORY resources; * we use this to instantiate an erom parser instance */ eio = bhnd_erom_iores_new(sc->bus_dev, 0); if ((erom = bhnd_erom_alloc(erom_class, cid, eio)) == NULL) { bhnd_erom_io_fini(eio); error = ENXIO; goto failed; } /* Populate our resource priority configuration */ hwprio = BHNDB_BUS_GET_HARDWARE_PRIO(sc->parent_dev, sc->dev); error = bhndb_init_region_cfg(sc, erom, sc->bus_res, cores, ncores, hwprio); if (error) { device_printf(sc->dev, "failed to initialize resource " "priority configuration: %d\n", error); goto failed; } /* Free our erom instance */ bhnd_erom_free(erom); erom = NULL; return (0); failed: BHNDB_LOCK_DESTROY(sc); if (sc->bus_res != NULL) bhndb_free_resources(sc->bus_res); if (erom != NULL) bhnd_erom_free(erom); bhnd_service_registry_fini(&sc->services); return (error); } /** * Default bhndb(4) implementation of DEVICE_DETACH(). * * This function detaches any child devices, and if successful, releases all * resources held by the bridge device. */ int bhndb_generic_detach(device_t dev) { struct bhndb_softc *sc; int error; sc = device_get_softc(dev); /* Detach children */ if ((error = bus_generic_detach(dev))) return (error); /* Delete children */ if ((error = device_delete_children(dev))) return (error); /* Clean up our service registry */ if ((error = bhnd_service_registry_fini(&sc->services))) return (error); /* Clean up our driver state. */ bhndb_free_resources(sc->bus_res); BHNDB_LOCK_DESTROY(sc); return (0); } /** * Default bhndb(4) implementation of DEVICE_SUSPEND(). * * This function calls bus_generic_suspend() (or implements equivalent * behavior). */ int bhndb_generic_suspend(device_t dev) { return (bus_generic_suspend(dev)); } /** * Default bhndb(4) implementation of DEVICE_RESUME(). * * This function calls bus_generic_resume() (or implements equivalent * behavior). */ int bhndb_generic_resume(device_t dev) { struct bhndb_softc *sc; struct bhndb_resources *bus_res; struct bhndb_dw_alloc *dwa; int error; sc = device_get_softc(dev); bus_res = sc->bus_res; /* Guarantee that all in-use dynamic register windows are mapped to * their previously configured target address. */ BHNDB_LOCK(sc); error = 0; for (size_t i = 0; i < bus_res->dwa_count; i++) { dwa = &bus_res->dw_alloc[i]; /* Skip regions that were not previously used */ if (bhndb_dw_is_free(bus_res, dwa) && dwa->target == 0x0) continue; /* Otherwise, ensure the register window is correct before * any children attempt MMIO */ error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target); if (error) break; } BHNDB_UNLOCK(sc); /* Error restoring hardware state; children cannot be safely resumed */ if (error) { device_printf(dev, "Unable to restore hardware configuration; " "cannot resume: %d\n", error); return (error); } return (bus_generic_resume(dev)); } /** * Default implementation of BHNDB_SUSPEND_RESOURCE. */ static void bhndb_suspend_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; struct bhndb_dw_alloc *dwa; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return; BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa == NULL) { BHNDB_UNLOCK(sc); return; } if (BHNDB_DEBUG(PRIO)) device_printf(child, "suspend resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); /* Release the resource's window reference */ bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } /** * Default implementation of BHNDB_RESUME_RESOURCE. */ static int bhndb_resume_resource(device_t dev, device_t child, int type, struct resource *r) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Non-MMIO resources (e.g. IRQs) are handled solely by our parent */ if (type != SYS_RES_MEMORY) return (0); /* Inactive resources don't require reallocation of bridge resources */ if (!(rman_get_flags(r) & RF_ACTIVE)) return (0); if (BHNDB_DEBUG(PRIO)) device_printf(child, "resume resource type=%d 0x%jx+0x%jx\n", type, rman_get_start(r), rman_get_size(r)); return (bhndb_try_activate_resource(sc, rman_get_device(r), type, rman_get_rid(r), r, NULL)); } /** * Default bhndb(4) implementation of BUS_READ_IVAR(). */ static int bhndb_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { return (ENOENT); } /** * Default bhndb(4) implementation of BUS_WRITE_IVAR(). */ static int bhndb_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * Return the address space for the given @p child device. */ bhndb_addrspace bhndb_get_addrspace(struct bhndb_softc *sc, device_t child) { struct bhndb_devinfo *dinfo; device_t imd_dev; /* Find the directly attached parent of the requesting device */ imd_dev = child; while (imd_dev != NULL && device_get_parent(imd_dev) != sc->dev) imd_dev = device_get_parent(imd_dev); if (imd_dev == NULL) panic("bhndb address space request for non-child device %s\n", device_get_nameunit(child)); dinfo = device_get_ivars(imd_dev); return (dinfo->addrspace); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The bhndb device state. * @param child The requesting child. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) */ static struct rman * bhndb_get_rman(struct bhndb_softc *sc, device_t child, int type) { switch (bhndb_get_addrspace(sc, child)) { case BHNDB_ADDRSPACE_NATIVE: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->ht_mem_rman); case SYS_RES_IRQ: return (NULL); default: return (NULL); } case BHNDB_ADDRSPACE_BRIDGED: switch (type) { case SYS_RES_MEMORY: return (&sc->bus_res->br_mem_rman); case SYS_RES_IRQ: return (&sc->bus_res->br_irq_rman); default: return (NULL); } } /* Quieten gcc */ return (NULL); } /** * Default implementation of BUS_ADD_CHILD() */ static device_t bhndb_add_child(device_t dev, u_int order, const char *name, int unit) { struct bhndb_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct bhndb_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE; resource_list_init(&dinfo->resources); device_set_ivars(child, dinfo); return (child); } /** * Default implementation of BUS_CHILD_DELETED(). */ static void bhndb_child_deleted(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { resource_list_free(&dinfo->resources); free(dinfo, M_BHND); } device_set_ivars(child, NULL); } /** * Default implementation of BHNDB_GET_CHIPID(). */ static const struct bhnd_chipid * bhndb_get_chipid(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->chipid); } /** * Default implementation of BHNDB_IS_CORE_DISABLED(). */ static bool bhndb_is_core_disabled(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc; sc = device_get_softc(dev); /* Try to defer to the bhndb bus parent */ if (BHNDB_BUS_IS_CORE_DISABLED(sc->parent_dev, dev, core)) return (true); /* Otherwise, we treat bridge-capable cores as unpopulated if they're * not the configured host bridge */ if (BHND_DEVCLASS_SUPPORTS_HOSTB(bhnd_core_class(core))) return (!bhnd_cores_equal(core, &sc->bridge_core)); /* Assume the core is populated */ return (false); } /** * Default bhndb(4) implementation of BHNDB_GET_HOSTB_CORE(). * * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged * bhnd(4) devices. */ static int bhndb_get_hostb_core(device_t dev, device_t child, struct bhnd_core_info *core) { struct bhndb_softc *sc = device_get_softc(dev); *core = sc->bridge_core; return (0); } /** * Default bhndb(4) implementation of BHND_BUS_GET_SERVICE_REGISTRY(). */ static struct bhnd_service_registry * bhndb_get_service_registry(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); return (&sc->services); } /** * Default bhndb(4) implementation of BUS_ALLOC_RESOURCE(). */ static struct resource * bhndb_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct bhndb_softc *sc; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { /* Delegate to our parent device's bus; the requested * resource type isn't handled locally. */ return (BUS_ALLOC_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, start, end, count, flags)); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Validate resource addresses */ if (start > end || count > ((end - start) + 1)) return (NULL); /* Make our reservation */ rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); /* Activate */ if (flags & RF_ACTIVE) { error = bus_activate_resource(child, type, *rid, rv); if (error) { device_printf(dev, "failed to activate entry %#x type %d for " "child %s: %d\n", *rid, type, device_get_nameunit(child), error); rman_release_resource(rv); return (NULL); } } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } /** * Default bhndb(4) implementation of BUS_RELEASE_RESOURCE(). */ static int bhndb_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_softc *sc; struct resource_list_entry *rle; bool passthrough; int error; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { return (BUS_RELEASE_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, r)); } /* Deactivate resources */ if (rman_get_flags(r) & RF_ACTIVE) { error = BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r); if (error) return (error); } if ((error = rman_release_resource(r))) return (error); if (!passthrough) { /* Clean resource list entry */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, rid); if (rle != NULL) rle->res = NULL; } return (0); } /** * Default bhndb(4) implementation of BUS_ADJUST_RESOURCE(). */ static int bhndb_adjust_resource(device_t dev, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { struct bhndb_softc *sc; struct rman *rm; rman_res_t mstart, mend; int error; sc = device_get_softc(dev); error = 0; /* Delegate to our parent device's bus if the requested resource type * isn't handled locally. */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { return (BUS_ADJUST_RESOURCE(device_get_parent(sc->parent_dev), child, type, r, start, end)); } /* Verify basic constraints */ if (end <= start) return (EINVAL); if (!rman_is_region_manager(r, rm)) return (ENXIO); BHNDB_LOCK(sc); /* If not active, allow any range permitted by the resource manager */ if (!(rman_get_flags(r) & RF_ACTIVE)) goto done; /* Otherwise, the range is limited by the bridged resource mapping */ error = bhndb_find_resource_limits(sc->bus_res, type, r, &mstart, &mend); if (error) goto done; if (start < mstart || end > mend) { error = EINVAL; goto done; } /* Fall through */ done: if (!error) error = rman_adjust_resource(r, start, end); BHNDB_UNLOCK(sc); return (error); } /** * Initialize child resource @p r with a virtual address, tag, and handle * copied from @p parent, adjusted to contain only the range defined by * @p offsize and @p size. * * @param r The register to be initialized. * @param parent The parent bus resource that fully contains the subregion. * @param offset The subregion offset within @p parent. * @param size The subregion size. * @p r. */ static int bhndb_init_child_resource(struct resource *r, struct resource *parent, bhnd_size_t offset, bhnd_size_t size) { bus_space_handle_t bh, child_bh; bus_space_tag_t bt; uintptr_t vaddr; int error; /* Fetch the parent resource's real bus values */ vaddr = (uintptr_t) rman_get_virtual(parent); bt = rman_get_bustag(parent); bh = rman_get_bushandle(parent); /* Configure child resource with window-adjusted real bus values */ vaddr += offset; error = bus_space_subregion(bt, bh, offset, size, &child_bh); if (error) return (error); rman_set_virtual(r, (void *) vaddr); rman_set_bustag(r, bt); rman_set_bushandle(r, child_bh); return (0); } /** * Attempt activation of a fixed register window mapping for @p child. * * @param sc BHNDB device state. * @param region The static region definition capable of mapping @p r. * @param child A child requesting resource activation. * @param type Resource type. * @param rid Resource identifier. * @param r Resource to be activated. * * @retval 0 if @p r was activated successfully * @retval ENOENT if no fixed register window was found. * @retval non-zero if @p r could not be activated. */ static int bhndb_activate_static_region(struct bhndb_softc *sc, struct bhndb_region *region, device_t child, int type, int rid, struct resource *r) { struct resource *bridge_res; const struct bhndb_regwin *win; bhnd_size_t parent_offset; rman_res_t r_start, r_size; int error; win = region->static_regwin; KASSERT(win != NULL && BHNDB_REGWIN_T_IS_STATIC(win->win_type), ("can't activate non-static region")); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Find the corresponding bridge resource */ bridge_res = bhndb_host_resource_for_regwin(sc->bus_res->res, win); if (bridge_res == NULL) return (ENXIO); /* Calculate subregion offset within the parent resource */ parent_offset = r_start - region->addr; parent_offset += win->win_offset; /* Configure resource with its real bus values. */ error = bhndb_init_child_resource(r, bridge_res, parent_offset, r_size); if (error) return (error); /* Mark active */ if ((error = rman_activate_resource(r))) return (error); return (0); } /** * Attempt to allocate/retain a dynamic register window for @p r, returning * the retained window. * * @param sc The bhndb driver state. * @param r The resource for which a window will be retained. */ static struct bhndb_dw_alloc * bhndb_retain_dynamic_window(struct bhndb_softc *sc, struct resource *r) { struct bhndb_dw_alloc *dwa; rman_res_t r_start, r_size; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Look for an existing dynamic window we can reference */ dwa = bhndb_dw_find_mapping(sc->bus_res, r_start, r_size); if (dwa != NULL) { if (bhndb_dw_retain(sc->bus_res, dwa, r) == 0) return (dwa); return (NULL); } /* Otherwise, try to reserve a free window */ dwa = bhndb_dw_next_free(sc->bus_res); if (dwa == NULL) { /* No free windows */ return (NULL); } /* Window must be large enough to map the entire resource */ if (dwa->win->win_size < rman_get_size(r)) return (NULL); /* Set the window target */ error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, rman_get_start(r), rman_get_size(r)); if (error) { device_printf(sc->dev, "dynamic window initialization " "for 0x%llx-0x%llx failed: %d\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, error); return (NULL); } /* Add our reservation */ if (bhndb_dw_retain(sc->bus_res, dwa, r)) return (NULL); return (dwa); } /** * Activate a resource using any viable static or dynamic register window. * * @param sc The bhndb driver state. * @param child The child holding ownership of @p r. * @param type The type of the resource to be activated. * @param rid The resource ID of @p r. * @param r The resource to be activated * @param[out] indirect On error and if not NULL, will be set to 'true' if * the caller should instead use an indirect resource mapping. * * @retval 0 success * @retval non-zero activation failed. */ static int bhndb_try_activate_resource(struct bhndb_softc *sc, device_t child, int type, int rid, struct resource *r, bool *indirect) { struct bhndb_region *region; struct bhndb_dw_alloc *dwa; bhndb_priority_t dw_priority; rman_res_t r_start, r_size; rman_res_t parent_offset; int error; BHNDB_LOCK_ASSERT(sc, MA_NOTOWNED); if (indirect != NULL) *indirect = false; switch (type) { case SYS_RES_IRQ: /* IRQ resources are always directly mapped */ return (rman_activate_resource(r)); case SYS_RES_MEMORY: /* Handled below */ break; default: device_printf(sc->dev, "unsupported resource type %d\n", type); return (ENXIO); } /* Only MMIO resources can be mapped via register windows */ KASSERT(type == SYS_RES_MEMORY, ("invalid type: %d", type)); r_start = rman_get_start(r); r_size = rman_get_size(r); /* Activate native addrspace resources using the host address space */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_NATIVE) { struct resource *parent; /* Find the bridge resource referenced by the child */ parent = bhndb_host_resource_for_range(sc->bus_res->res, type, r_start, r_size); if (parent == NULL) { device_printf(sc->dev, "host resource not found " "for 0x%llx-0x%llx\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (ENOENT); } /* Initialize child resource with the real bus values */ error = bhndb_init_child_resource(r, parent, r_start - rman_get_start(parent), r_size); if (error) return (error); /* Try to activate child resource */ return (rman_activate_resource(r)); } /* Default to low priority */ dw_priority = BHNDB_PRIORITY_LOW; /* Look for a bus region matching the resource's address range */ region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) dw_priority = region->priority; /* Prefer static mappings over consuming a dynamic windows. */ if (region && region->static_regwin) { error = bhndb_activate_static_region(sc, region, child, type, rid, r); if (error) device_printf(sc->dev, "static window allocation " "for 0x%llx-0x%llx failed\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1); return (error); } /* A dynamic window will be required; is this resource high enough * priority to be reserved a dynamic window? */ if (dw_priority < sc->bus_res->min_prio) { if (indirect) *indirect = true; return (ENOMEM); } /* Find and retain a usable window */ BHNDB_LOCK(sc); { dwa = bhndb_retain_dynamic_window(sc, r); } BHNDB_UNLOCK(sc); if (dwa == NULL) { if (indirect) *indirect = true; return (ENOMEM); } /* Configure resource with its real bus values. */ parent_offset = dwa->win->win_offset; parent_offset += r_start - dwa->target; error = bhndb_init_child_resource(r, dwa->parent_res, parent_offset, dwa->win->win_size); if (error) goto failed; /* Mark active */ if ((error = rman_activate_resource(r))) goto failed; return (0); failed: /* Release our region allocation. */ BHNDB_LOCK(sc); bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); return (error); } /** * Default bhndb(4) implementation of BUS_ACTIVATE_RESOURCE(). */ static int bhndb_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_softc *sc = device_get_softc(dev); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { return (BUS_ACTIVATE_RESOURCE(device_get_parent(sc->parent_dev), child, type, rid, r)); } return (bhndb_try_activate_resource(sc, child, type, rid, r, NULL)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct bhndb_dw_alloc *dwa; struct bhndb_softc *sc; struct rman *rm; int error; sc = device_get_softc(dev); /* Delegate directly to our parent device's bus if the requested * resource type isn't handled locally. */ rm = bhndb_get_rman(sc, child, type); if (rm == NULL) { return (BUS_DEACTIVATE_RESOURCE( device_get_parent(sc->parent_dev), child, type, rid, r)); } /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); switch (type) { case SYS_RES_IRQ: /* No bridge-level state to be freed */ return (0); case SYS_RES_MEMORY: /* Free any dynamic window allocation. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { BHNDB_LOCK(sc); dwa = bhndb_dw_find_resource(sc->bus_res, r); if (dwa != NULL) bhndb_dw_release(sc->bus_res, dwa, r); BHNDB_UNLOCK(sc); } return (0); default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /** * Default bhndb(4) implementation of BUS_GET_RESOURCE_LIST(). */ static struct resource_list * bhndb_get_resource_list(device_t dev, device_t child) { struct bhndb_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /** * Default bhndb(4) implementation of BHND_BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_NATIVE children, all resources are activated as direct * resources via BUS_ACTIVATE_RESOURCE(). * * For BHNDB_ADDRSPACE_BRIDGED children, the resource priority is determined, * and if possible, the resource is activated as a direct resource. For example, * depending on resource priority and bridge resource availability, this * function will attempt to activate SYS_RES_MEMORY resources using either a * static register window, a dynamic register window, or it will configure @p r * as an indirect resource -- in that order. */ static int bhndb_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct bhndb_softc *sc; struct bhndb_region *region; bhndb_priority_t r_prio; rman_res_t r_start, r_size; int error; bool indirect; KASSERT(!r->direct, ("direct flag set on inactive resource")); KASSERT(!(rman_get_flags(r->res) & RF_ACTIVE), ("RF_ACTIVE set on inactive resource")); sc = device_get_softc(dev); /* Delegate directly to BUS_ACTIVATE_RESOURCE() if the requested * resource type isn't handled locally. */ if (bhndb_get_rman(sc, child, type) == NULL) { error = BUS_ACTIVATE_RESOURCE(dev, child, type, rid, r->res); if (error == 0) r->direct = true; return (error); } r_start = rman_get_start(r->res); r_size = rman_get_size(r->res); /* Determine the resource priority of bridged resources, and skip direct * allocation if the priority is too low. */ if (bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { switch (type) { case SYS_RES_IRQ: /* IRQ resources are always direct */ break; case SYS_RES_MEMORY: region = bhndb_find_resource_region(sc->bus_res, r_start, r_size); if (region != NULL) r_prio = region->priority; else r_prio = BHNDB_PRIORITY_NONE; /* If less than the minimum dynamic window priority, * this resource should always be indirect. */ if (r_prio < sc->bus_res->min_prio) return (0); break; default: device_printf(dev, "unsupported resource type %d\n", type); return (ENXIO); } } /* Attempt direct activation */ error = bhndb_try_activate_resource(sc, child, type, rid, r->res, &indirect); if (!error) { r->direct = true; } else if (indirect) { /* The request was valid, but no viable register window is * available; indirection must be employed. */ error = 0; r->direct = false; } if (BHNDB_DEBUG(PRIO) && bhndb_get_addrspace(sc, child) == BHNDB_ADDRSPACE_BRIDGED) { device_printf(child, "activated 0x%llx-0x%llx as %s " "resource\n", (unsigned long long) r_start, (unsigned long long) r_start + r_size - 1, r->direct ? "direct" : "indirect"); } return (error); } /** * Default bhndb(4) implementation of BHND_BUS_DEACTIVATE_RESOURCE(). */ static int bhndb_deactivate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { int error; /* Indirect resources don't require activation */ if (!r->direct) return (0); KASSERT(rman_get_flags(r->res) & RF_ACTIVE, ("RF_ACTIVE not set on direct resource")); /* Perform deactivation */ error = BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r->res); if (!error) r->direct = false; return (error); } /** * Find the best available bridge resource allocation record capable of handling * bus I/O requests of @p size at @p addr. * * In order of preference, this function will either: * * - Configure and return a free allocation record * - Return an existing allocation record mapping the requested space, or * - Steal, configure, and return an in-use allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] borrowed Set to true if the allocation record was borrowed to * fulfill this request; the borrowed record maps the target address range, * and must not be modified. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static struct bhndb_dw_alloc * bhndb_io_resource_get_window(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bool *borrowed, bool *stolen, bus_addr_t *restore) { struct bhndb_resources *br; struct bhndb_dw_alloc *dwa; struct bhndb_region *region; BHNDB_LOCK_ASSERT(sc, MA_OWNED); br = sc->bus_res; *borrowed = false; *stolen = false; /* Try to fetch a free window */ if ((dwa = bhndb_dw_next_free(br)) != NULL) return (dwa); /* Search for an existing dynamic mapping of this address range. * Static regions are not searched, as a statically mapped * region would never be allocated as an indirect resource. */ for (size_t i = 0; i < br->dwa_count; i++) { const struct bhndb_regwin *win; dwa = &br->dw_alloc[i]; win = dwa->win; KASSERT(win->win_type == BHNDB_REGWIN_T_DYN, ("invalid register window type")); /* Verify the range */ if (addr < dwa->target) continue; if (addr + size > dwa->target + win->win_size) continue; /* Found */ *borrowed = true; return (dwa); } /* Try to steal a window; this should only be required on very early * PCI_V0 (BCM4318, etc) Wi-Fi chipsets */ region = bhndb_find_resource_region(br, addr, size); if (region == NULL) return (NULL); if ((region->alloc_flags & BHNDB_ALLOC_FULFILL_ON_OVERCOMMIT) == 0) return (NULL); /* Steal a window. This acquires our backing spinlock, disabling * interrupts; the spinlock will be released by * bhndb_dw_return_stolen() */ if ((dwa = bhndb_dw_steal(br, restore)) != NULL) { *stolen = true; return (dwa); } panic("register windows exhausted attempting to map 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } /** * Return a borrowed reference to a bridge resource allocation record capable * of handling bus I/O requests of @p size at @p addr. * * This will either return a reference to an existing allocation record mapping * the requested space, or will configure and return a free allocation record. * * Will panic if a usable record cannot be found. * * @param sc Bridge driver state. * @param addr The I/O target address. * @param size The size of the I/O operation to be performed at @p addr. * @param[out] offset The offset within the returned resource at which * to perform the I/O request. * @param[out] stolen Set to true if the allocation record was stolen to fulfill * this request. If a stolen allocation record is returned, * bhndb_io_resource_restore() must be called upon completion of the bus I/O * request. * @param[out] restore If the allocation record was stolen, this will be set * to the target that must be restored. */ static inline struct bhndb_dw_alloc * bhndb_io_resource(struct bhndb_softc *sc, bus_addr_t addr, bus_size_t size, bus_size_t *offset, bool *stolen, bus_addr_t *restore) { struct bhndb_dw_alloc *dwa; bool borrowed; int error; BHNDB_LOCK_ASSERT(sc, MA_OWNED); dwa = bhndb_io_resource_get_window(sc, addr, size, &borrowed, stolen, restore); /* Adjust the window if the I/O request won't fit in the current * target range. */ if (addr < dwa->target || addr > dwa->target + dwa->win->win_size || (dwa->target + dwa->win->win_size) - addr < size) { /* Cannot modify target of borrowed windows */ if (borrowed) { panic("borrowed register window does not map expected " "range 0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } error = bhndb_dw_set_addr(sc->dev, sc->bus_res, dwa, addr, size); if (error) { panic("failed to set register window target mapping " "0x%llx-0x%llx\n", (unsigned long long) addr, (unsigned long long) addr+size-1); } } /* Calculate the offset and return */ *offset = (addr - dwa->target) + dwa->win->win_offset; return (dwa); } /* * BHND_BUS_(READ|WRITE_* implementations */ /* bhndb_bus_(read|write) common implementation */ #define BHNDB_IO_COMMON_SETUP(_io_size) \ struct bhndb_softc *sc; \ struct bhndb_dw_alloc *dwa; \ struct resource *io_res; \ bus_size_t io_offset; \ bus_addr_t restore; \ bool stolen; \ \ sc = device_get_softc(dev); \ \ BHNDB_LOCK(sc); \ dwa = bhndb_io_resource(sc, rman_get_start(r->res) + \ offset, _io_size, &io_offset, &stolen, &restore); \ io_res = dwa->parent_res; \ \ KASSERT(!r->direct, \ ("bhnd_bus slow path used for direct resource")); \ \ KASSERT(rman_get_flags(io_res) & RF_ACTIVE, \ ("i/o resource is not active")); #define BHNDB_IO_COMMON_TEARDOWN() \ if (stolen) { \ bhndb_dw_return_stolen(sc->dev, sc->bus_res, \ dwa, restore); \ } \ BHNDB_UNLOCK(sc); /* Defines a bhndb_bus_read_* method implementation */ #define BHNDB_IO_READ(_type, _name) \ static _type \ bhndb_bus_read_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset) \ { \ _type v; \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ v = bus_read_ ## _name (io_res, io_offset); \ BHNDB_IO_COMMON_TEARDOWN(); \ \ return (v); \ } /* Defines a bhndb_bus_write_* method implementation */ #define BHNDB_IO_WRITE(_type, _name) \ static void \ bhndb_bus_write_ ## _name (device_t dev, device_t child, \ struct bhnd_resource *r, bus_size_t offset, _type value) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type)); \ bus_write_ ## _name (io_res, io_offset, value); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a bhndb_bus_(read|write|set)_(multi|region)_* method */ #define BHNDB_IO_MISC(_type, _ptr, _op, _size) \ static void \ bhndb_bus_ ## _op ## _ ## _size (device_t dev, \ device_t child, struct bhnd_resource *r, bus_size_t offset, \ _type _ptr datap, bus_size_t count) \ { \ BHNDB_IO_COMMON_SETUP(sizeof(_type) * count); \ bus_ ## _op ## _ ## _size (io_res, io_offset, \ datap, count); \ BHNDB_IO_COMMON_TEARDOWN(); \ } /* Defines a complete set of read/write methods */ #define BHNDB_IO_METHODS(_type, _size) \ BHNDB_IO_READ(_type, _size) \ BHNDB_IO_WRITE(_type, _size) \ \ BHNDB_IO_READ(_type, stream_ ## _size) \ BHNDB_IO_WRITE(_type, stream_ ## _size) \ \ BHNDB_IO_MISC(_type, *, read_multi, _size) \ BHNDB_IO_MISC(_type, *, write_multi, _size) \ \ BHNDB_IO_MISC(_type, *, read_multi_stream, _size) \ BHNDB_IO_MISC(_type, *, write_multi_stream, _size) \ \ BHNDB_IO_MISC(_type, , set_multi, _size) \ BHNDB_IO_MISC(_type, , set_region, _size) \ BHNDB_IO_MISC(_type, *, read_region, _size) \ BHNDB_IO_MISC(_type, *, write_region, _size) \ \ BHNDB_IO_MISC(_type, *, read_region_stream, _size) \ BHNDB_IO_MISC(_type, *, write_region_stream, _size) BHNDB_IO_METHODS(uint8_t, 1); BHNDB_IO_METHODS(uint16_t, 2); BHNDB_IO_METHODS(uint32_t, 4); /** * Default bhndb(4) implementation of BHND_BUS_BARRIER(). */ static void bhndb_bus_barrier(device_t dev, device_t child, struct bhnd_resource *r, bus_size_t offset, bus_size_t length, int flags) { BHNDB_IO_COMMON_SETUP(length); bus_barrier(io_res, io_offset + offset, length, flags); BHNDB_IO_COMMON_TEARDOWN(); } /** * Default bhndb(4) implementation of BHND_MAP_INTR(). */ static int bhndb_bhnd_map_intr(device_t dev, device_t child, u_int intr, rman_res_t *irq) { struct bhndb_softc *sc; u_int ivec; int error; sc = device_get_softc(dev); /* Is the intr valid? */ if (intr >= bhnd_get_intr_count(child)) return (EINVAL); /* Fetch the interrupt vector */ if ((error = bhnd_get_intr_ivec(child, intr, &ivec))) return (error); /* Map directly to the actual backplane interrupt vector */ *irq = ivec; return (0); } /** * Default bhndb(4) implementation of BHND_UNMAP_INTR(). */ static void bhndb_bhnd_unmap_intr(device_t dev, device_t child, rman_res_t irq) { /* No state to clean up */ } /** * Default bhndb(4) implementation of BUS_SETUP_INTR(). */ static int bhndb_setup_intr(device_t dev, device_t child, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { struct bhndb_softc *sc; struct bhndb_intr_isrc *isrc; struct bhndb_intr_handler *ih; int error; sc = device_get_softc(dev); /* Fetch the isrc */ if ((error = BHNDB_MAP_INTR_ISRC(dev, r, &isrc))) { device_printf(dev, "failed to fetch isrc: %d\n", error); return (error); } /* Allocate new ihandler entry */ ih = bhndb_alloc_intr_handler(child, r, isrc); if (ih == NULL) return (ENOMEM); /* Perform actual interrupt setup via the host isrc */ error = bus_setup_intr(isrc->is_owner, isrc->is_res, flags, filter, handler, arg, &ih->ih_cookiep); if (error) { bhndb_free_intr_handler(ih); return (error); } /* Add to our interrupt handler list */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Provide the interrupt handler entry as our cookiep value */ *cookiep = ih; return (0); } /** * Default bhndb(4) implementation of BUS_TEARDOWN_INTR(). */ static int bhndb_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookiep) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; int error; sc = device_get_softc(dev); /* Locate and claim ownership of the interrupt handler entry */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookiep); if (ih == NULL) { panic("%s requested teardown of invalid cookiep %p", device_get_nameunit(child), cookiep); } bhndb_deregister_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); /* Perform actual interrupt teardown via the host isrc */ isrc = ih->ih_isrc; error = bus_teardown_intr(isrc->is_owner, isrc->is_res, ih->ih_cookiep); if (error) { /* If teardown fails, we need to reinsert the handler entry * to allow later teardown */ BHNDB_LOCK(sc); bhndb_register_intr_handler(sc->bus_res, ih); BHNDB_UNLOCK(sc); return (error); } /* Free the entry */ bhndb_free_intr_handler(ih); return (0); } /** * Default bhndb(4) implementation of BUS_BIND_INTR(). */ static int bhndb_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); isrc = NULL; /* Fetch the isrc corresponding to the child IRQ resource */ BHNDB_LOCK(sc); STAILQ_FOREACH(ih, &sc->bus_res->bus_intrs, ih_link) { if (ih->ih_res == irq) { isrc = ih->ih_isrc; break; } } BHNDB_UNLOCK(sc); if (isrc == NULL) { panic("%s requested bind of invalid irq %#jx-%#jx", device_get_nameunit(child), rman_get_start(irq), rman_get_end(irq)); } /* Perform actual bind via the host isrc */ return (bus_bind_intr(isrc->is_owner, isrc->is_res, cpu)); } /** * Default bhndb(4) implementation of BUS_DESCRIBE_INTR(). */ static int bhndb_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { struct bhndb_softc *sc; struct bhndb_intr_handler *ih; struct bhndb_intr_isrc *isrc; sc = device_get_softc(dev); /* Locate the interrupt handler entry; the caller owns the handler * reference, and thus our entry is guaranteed to remain valid after * we drop out lock below. */ BHNDB_LOCK(sc); ih = bhndb_find_intr_handler(sc->bus_res, cookie); if (ih == NULL) { panic("%s requested invalid cookiep %p", device_get_nameunit(child), cookie); } isrc = ih->ih_isrc; BHNDB_UNLOCK(sc); /* Perform the actual request via the host isrc */ return (BUS_DESCRIBE_INTR(device_get_parent(isrc->is_owner), isrc->is_owner, isrc->is_res, ih->ih_cookiep, descr)); } /** * Default bhndb(4) implementation of BUS_CONFIG_INTR(). */ static int bhndb_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BUS_REMAP_INTR(). */ static int bhndb_remap_intr(device_t dev, device_t child, u_int irq) { /* Unsupported */ return (ENXIO); } /** * Default bhndb(4) implementation of BHND_BUS_GET_DMA_TRANSLATION(). */ static inline int bhndb_get_dma_translation(device_t dev, device_t child, u_int width, uint32_t flags, bus_dma_tag_t *dmat, struct bhnd_dma_translation *translation) { struct bhndb_softc *sc; const struct bhndb_hwcfg *hwcfg; const struct bhnd_dma_translation *match; bus_dma_tag_t match_dmat; bhnd_addr_t addr_mask, match_addr_mask; sc = device_get_softc(dev); hwcfg = sc->bus_res->cfg; /* Is DMA supported? */ if (sc->bus_res->res->dma_tags == NULL) return (ENODEV); /* Is the requested width supported? */ if (width > BHND_DMA_ADDR_32BIT) { /* Backplane must support 64-bit addressing */ if (!(sc->chipid.chip_caps & BHND_CAP_BP64)) width = BHND_DMA_ADDR_32BIT; } /* Find the best matching descriptor for the requested width */ addr_mask = BHND_DMA_ADDR_BITMASK(width); match = NULL; match_addr_mask = 0x0; match_dmat = NULL; for (size_t i = 0; i < sc->bus_res->res->num_dma_tags; i++) { const struct bhnd_dma_translation *dwin; bhnd_addr_t masked; dwin = &hwcfg->dma_translations[i]; /* The base address must be device addressable */ if ((dwin->base_addr & addr_mask) != dwin->base_addr) continue; /* The flags must match */ if ((dwin->flags & flags) != flags) continue; /* The window must cover at least part of our addressable * range */ masked = (dwin->addr_mask | dwin->addrext_mask) & addr_mask; if (masked == 0) continue; /* Is this a better match? */ if (match == NULL || masked > match_addr_mask) { match = dwin; match_addr_mask = masked; match_dmat = sc->bus_res->res->dma_tags[i]; } } if (match == NULL || match_addr_mask == 0) return (ENOENT); if (dmat != NULL) *dmat = match_dmat; if (translation != NULL) *translation = *match; return (0); } /** * Default bhndb(4) implementation of BUS_GET_DMA_TAG(). */ static bus_dma_tag_t bhndb_get_dma_tag(device_t dev, device_t child) { struct bhndb_softc *sc = device_get_softc(dev); /* * A bridge may have multiple DMA translation descriptors, each with * their own incompatible restrictions; drivers should in general call * BHND_BUS_GET_DMA_TRANSLATION() to fetch both the best available DMA * translation, and its corresponding DMA tag. * * Child drivers that do not use BHND_BUS_GET_DMA_TRANSLATION() are * responsible for creating their own restricted DMA tag; since we * cannot do this for them in BUS_GET_DMA_TAG(), we simply return the * bridge parent's DMA tag directly; */ return (bus_get_dma_tag(sc->parent_dev)); } static device_method_t bhndb_methods[] = { /* Device interface */ \ DEVMETHOD(device_probe, bhndb_generic_probe), DEVMETHOD(device_detach, bhndb_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bhndb_generic_suspend), DEVMETHOD(device_resume, bhndb_generic_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, bhndb_probe_nomatch), DEVMETHOD(bus_print_child, bhndb_print_child), - DEVMETHOD(bus_child_pnpinfo_str, bhndb_child_pnpinfo_str), - DEVMETHOD(bus_child_location_str, bhndb_child_location_str), + DEVMETHOD(bus_child_location, bhndb_child_location), DEVMETHOD(bus_add_child, bhndb_add_child), DEVMETHOD(bus_child_deleted, bhndb_child_deleted), DEVMETHOD(bus_alloc_resource, bhndb_alloc_resource), DEVMETHOD(bus_release_resource, bhndb_release_resource), DEVMETHOD(bus_activate_resource, bhndb_activate_resource), DEVMETHOD(bus_deactivate_resource, bhndb_deactivate_resource), DEVMETHOD(bus_setup_intr, bhndb_setup_intr), DEVMETHOD(bus_teardown_intr, bhndb_teardown_intr), DEVMETHOD(bus_config_intr, bhndb_config_intr), DEVMETHOD(bus_bind_intr, bhndb_bind_intr), DEVMETHOD(bus_describe_intr, bhndb_describe_intr), DEVMETHOD(bus_remap_intr, bhndb_remap_intr), DEVMETHOD(bus_get_dma_tag, bhndb_get_dma_tag), DEVMETHOD(bus_adjust_resource, bhndb_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_get_resource_list, bhndb_get_resource_list), DEVMETHOD(bus_read_ivar, bhndb_read_ivar), DEVMETHOD(bus_write_ivar, bhndb_write_ivar), /* BHNDB interface */ DEVMETHOD(bhndb_get_chipid, bhndb_get_chipid), DEVMETHOD(bhndb_is_core_disabled, bhndb_is_core_disabled), DEVMETHOD(bhndb_get_hostb_core, bhndb_get_hostb_core), DEVMETHOD(bhndb_suspend_resource, bhndb_suspend_resource), DEVMETHOD(bhndb_resume_resource, bhndb_resume_resource), /* BHND interface */ DEVMETHOD(bhnd_bus_get_chipid, bhndb_get_chipid), DEVMETHOD(bhnd_bus_activate_resource, bhndb_activate_bhnd_resource), DEVMETHOD(bhnd_bus_deactivate_resource, bhndb_deactivate_bhnd_resource), DEVMETHOD(bhnd_bus_get_nvram_var, bhnd_bus_generic_get_nvram_var), DEVMETHOD(bhnd_bus_map_intr, bhndb_bhnd_map_intr), DEVMETHOD(bhnd_bus_unmap_intr, bhndb_bhnd_unmap_intr), DEVMETHOD(bhnd_bus_get_dma_translation, bhndb_get_dma_translation), DEVMETHOD(bhnd_bus_get_service_registry,bhndb_get_service_registry), DEVMETHOD(bhnd_bus_register_provider, bhnd_bus_generic_sr_register_provider), DEVMETHOD(bhnd_bus_deregister_provider, bhnd_bus_generic_sr_deregister_provider), DEVMETHOD(bhnd_bus_retain_provider, bhnd_bus_generic_sr_retain_provider), DEVMETHOD(bhnd_bus_release_provider, bhnd_bus_generic_sr_release_provider), DEVMETHOD(bhnd_bus_read_1, bhndb_bus_read_1), DEVMETHOD(bhnd_bus_read_2, bhndb_bus_read_2), DEVMETHOD(bhnd_bus_read_4, bhndb_bus_read_4), DEVMETHOD(bhnd_bus_write_1, bhndb_bus_write_1), DEVMETHOD(bhnd_bus_write_2, bhndb_bus_write_2), DEVMETHOD(bhnd_bus_write_4, bhndb_bus_write_4), DEVMETHOD(bhnd_bus_read_stream_1, bhndb_bus_read_stream_1), DEVMETHOD(bhnd_bus_read_stream_2, bhndb_bus_read_stream_2), DEVMETHOD(bhnd_bus_read_stream_4, bhndb_bus_read_stream_4), DEVMETHOD(bhnd_bus_write_stream_1, bhndb_bus_write_stream_1), DEVMETHOD(bhnd_bus_write_stream_2, bhndb_bus_write_stream_2), DEVMETHOD(bhnd_bus_write_stream_4, bhndb_bus_write_stream_4), DEVMETHOD(bhnd_bus_read_multi_1, bhndb_bus_read_multi_1), DEVMETHOD(bhnd_bus_read_multi_2, bhndb_bus_read_multi_2), DEVMETHOD(bhnd_bus_read_multi_4, bhndb_bus_read_multi_4), DEVMETHOD(bhnd_bus_write_multi_1, bhndb_bus_write_multi_1), DEVMETHOD(bhnd_bus_write_multi_2, bhndb_bus_write_multi_2), DEVMETHOD(bhnd_bus_write_multi_4, bhndb_bus_write_multi_4), DEVMETHOD(bhnd_bus_read_multi_stream_1, bhndb_bus_read_multi_stream_1), DEVMETHOD(bhnd_bus_read_multi_stream_2, bhndb_bus_read_multi_stream_2), DEVMETHOD(bhnd_bus_read_multi_stream_4, bhndb_bus_read_multi_stream_4), DEVMETHOD(bhnd_bus_write_multi_stream_1,bhndb_bus_write_multi_stream_1), DEVMETHOD(bhnd_bus_write_multi_stream_2,bhndb_bus_write_multi_stream_2), DEVMETHOD(bhnd_bus_write_multi_stream_4,bhndb_bus_write_multi_stream_4), DEVMETHOD(bhnd_bus_set_multi_1, bhndb_bus_set_multi_1), DEVMETHOD(bhnd_bus_set_multi_2, bhndb_bus_set_multi_2), DEVMETHOD(bhnd_bus_set_multi_4, bhndb_bus_set_multi_4), DEVMETHOD(bhnd_bus_set_region_1, bhndb_bus_set_region_1), DEVMETHOD(bhnd_bus_set_region_2, bhndb_bus_set_region_2), DEVMETHOD(bhnd_bus_set_region_4, bhndb_bus_set_region_4), DEVMETHOD(bhnd_bus_read_region_1, bhndb_bus_read_region_1), DEVMETHOD(bhnd_bus_read_region_2, bhndb_bus_read_region_2), DEVMETHOD(bhnd_bus_read_region_4, bhndb_bus_read_region_4), DEVMETHOD(bhnd_bus_write_region_1, bhndb_bus_write_region_1), DEVMETHOD(bhnd_bus_write_region_2, bhndb_bus_write_region_2), DEVMETHOD(bhnd_bus_write_region_4, bhndb_bus_write_region_4), DEVMETHOD(bhnd_bus_read_region_stream_1,bhndb_bus_read_region_stream_1), DEVMETHOD(bhnd_bus_read_region_stream_2,bhndb_bus_read_region_stream_2), DEVMETHOD(bhnd_bus_read_region_stream_4,bhndb_bus_read_region_stream_4), DEVMETHOD(bhnd_bus_write_region_stream_1,bhndb_bus_write_region_stream_1), DEVMETHOD(bhnd_bus_write_region_stream_2,bhndb_bus_write_region_stream_2), DEVMETHOD(bhnd_bus_write_region_stream_4,bhndb_bus_write_region_stream_4), DEVMETHOD(bhnd_bus_barrier, bhndb_bus_barrier), DEVMETHOD_END }; devclass_t bhndb_devclass; DEFINE_CLASS_0(bhndb, bhndb_driver, bhndb_methods, sizeof(struct bhndb_softc)); MODULE_VERSION(bhndb, 1); MODULE_DEPEND(bhndb, bhnd, 1, 1, 1); diff --git a/sys/dev/bhnd/cores/chipc/chipc.c b/sys/dev/bhnd/cores/chipc/chipc.c index 1449c3b565dc..314f214d08b1 100644 --- a/sys/dev/bhnd/cores/chipc/chipc.c +++ b/sys/dev/bhnd/cores/chipc/chipc.c @@ -1,1454 +1,1430 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2015-2016 Landon Fuller * Copyright (c) 2016 Michael Zhilin * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Broadcom ChipCommon driver. * * With the exception of some very early chipsets, the ChipCommon core * has been included in all HND SoCs and chipsets based on the siba(4) * and bcma(4) interconnects, providing a common interface to chipset * identification, bus enumeration, UARTs, clocks, watchdog interrupts, * GPIO, flash, etc. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "chipcreg.h" #include "chipcvar.h" #include "chipc_private.h" devclass_t bhnd_chipc_devclass; /**< bhnd(4) chipcommon device class */ static struct bhnd_device_quirk chipc_quirks[]; /* Supported device identifiers */ static const struct bhnd_device chipc_devices[] = { BHND_DEVICE(BCM, CC, NULL, chipc_quirks), BHND_DEVICE(BCM, 4706_CC, NULL, chipc_quirks), BHND_DEVICE_END }; /* Device quirks table */ static struct bhnd_device_quirk chipc_quirks[] = { /* HND OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (12), CHIPC_QUIRK_OTP_HND), /* (?) */ BHND_CORE_QUIRK (HWREV_EQ (17), CHIPC_QUIRK_OTP_HND), /* BCM4311 */ BHND_CORE_QUIRK (HWREV_EQ (22), CHIPC_QUIRK_OTP_HND), /* BCM4312 */ /* IPX OTP controller revisions */ BHND_CORE_QUIRK (HWREV_EQ (21), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(23), CHIPC_QUIRK_OTP_IPX), BHND_CORE_QUIRK (HWREV_GTE(32), CHIPC_QUIRK_SUPPORTS_SPROM), BHND_CORE_QUIRK (HWREV_GTE(35), CHIPC_QUIRK_SUPPORTS_CAP_EXT), BHND_CORE_QUIRK (HWREV_GTE(49), CHIPC_QUIRK_IPX_OTPL_SIZE), /* 4706 variant quirks */ BHND_CORE_QUIRK (HWREV_EQ (38), CHIPC_QUIRK_4706_NFLASH), /* BCM5357? */ BHND_CHIP_QUIRK (4706, HWREV_ANY, CHIPC_QUIRK_4706_NFLASH), /* 4331 quirks*/ BHND_CHIP_QUIRK (4331, HWREV_ANY, CHIPC_QUIRK_4331_EXTPA_MUX_SPROM), BHND_PKG_QUIRK (4331, TN, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TNA0, CHIPC_QUIRK_4331_GPIO2_5_MUX_SPROM), BHND_PKG_QUIRK (4331, TT, CHIPC_QUIRK_4331_EXTPA2_MUX_SPROM), /* 4360 quirks */ BHND_CHIP_QUIRK (4352, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43460, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43462, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_CHIP_QUIRK (43602, HWREV_LTE(2), CHIPC_QUIRK_4360_FEM_MUX_SPROM), BHND_DEVICE_QUIRK_END }; static int chipc_add_children(struct chipc_softc *sc); static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps); static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps); static bool chipc_should_enable_muxed_sprom( struct chipc_softc *sc); static int chipc_enable_otp_power(struct chipc_softc *sc); static void chipc_disable_otp_power(struct chipc_softc *sc); static int chipc_enable_sprom_pins(struct chipc_softc *sc); static void chipc_disable_sprom_pins(struct chipc_softc *sc); static int chipc_try_activate_resource(struct chipc_softc *sc, device_t child, int type, int rid, struct resource *r, bool req_direct); static int chipc_init_rman(struct chipc_softc *sc); static void chipc_free_rman(struct chipc_softc *sc); static struct rman *chipc_get_rman(struct chipc_softc *sc, int type); /* quirk and capability flag convenience macros */ #define CHIPC_QUIRK(_sc, _name) \ ((_sc)->quirks & CHIPC_QUIRK_ ## _name) #define CHIPC_CAP(_sc, _name) \ ((_sc)->caps._name) #define CHIPC_ASSERT_QUIRK(_sc, name) \ KASSERT(CHIPC_QUIRK((_sc), name), ("quirk " __STRING(_name) " not set")) #define CHIPC_ASSERT_CAP(_sc, name) \ KASSERT(CHIPC_CAP((_sc), name), ("capability " __STRING(_name) " not set")) static int chipc_probe(device_t dev) { const struct bhnd_device *id; id = bhnd_device_lookup(dev, chipc_devices, sizeof(chipc_devices[0])); if (id == NULL) return (ENXIO); bhnd_set_default_core_desc(dev); return (BUS_PROBE_DEFAULT); } static int chipc_attach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; sc->quirks = bhnd_device_quirks(dev, chipc_devices, sizeof(chipc_devices[0])); sc->sprom_refcnt = 0; CHIPC_LOCK_INIT(sc); STAILQ_INIT(&sc->mem_regions); /* Set up resource management */ if ((error = chipc_init_rman(sc))) { device_printf(sc->dev, "failed to initialize chipc resource state: %d\n", error); goto failed; } /* Allocate the region containing the chipc register block */ if ((sc->core_region = chipc_find_region_by_rid(sc, 0)) == NULL) { error = ENXIO; goto failed; } error = chipc_retain_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); if (error) { sc->core_region = NULL; goto failed; } /* Save a direct reference to our chipc registers */ sc->core = sc->core_region->cr_res; /* Fetch and parse capability register(s) */ if ((error = chipc_read_caps(sc, &sc->caps))) goto failed; if (bootverbose) chipc_print_caps(sc->dev, &sc->caps); /* Attach all supported child devices */ if ((error = chipc_add_children(sc))) goto failed; /* * Register ourselves with the bus; we're fully initialized and can * response to ChipCommin API requests. * * Since our children may need access to ChipCommon, this must be done * before attaching our children below (via bus_generic_attach). */ if ((error = bhnd_register_provider(dev, BHND_SERVICE_CHIPC))) goto failed; if ((error = bus_generic_attach(dev))) goto failed; return (0); failed: device_delete_children(sc->dev); if (sc->core_region != NULL) { chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); } chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (error); } static int chipc_detach(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); if ((error = bus_generic_detach(dev))) return (error); if ((error = device_delete_children(dev))) return (error); if ((error = bhnd_deregister_provider(dev, BHND_SERVICE_ANY))) return (error); chipc_release_region(sc, sc->core_region, RF_ALLOCATED|RF_ACTIVE); chipc_free_rman(sc); CHIPC_LOCK_DESTROY(sc); return (0); } static int chipc_add_children(struct chipc_softc *sc) { device_t child; const char *flash_bus; int error; /* SPROM/OTP */ if (sc->caps.nvram_src == BHND_NVRAM_SRC_SPROM || sc->caps.nvram_src == BHND_NVRAM_SRC_OTP) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_nvram", -1); if (child == NULL) { device_printf(sc->dev, "failed to add nvram device\n"); return (ENXIO); } /* Both OTP and external SPROM are mapped at CHIPC_SPROM_OTP */ error = chipc_set_mem_resource(sc, child, 0, CHIPC_SPROM_OTP, CHIPC_SPROM_OTP_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set OTP memory " "resource: %d\n", error); return (error); } } /* * PMU/PWR_CTRL * * On AOB ("Always on Bus") devices, the PMU core (if it exists) is * attached directly to the bhnd(4) bus -- not chipc. */ if (sc->caps.pmu && !sc->caps.aob) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pmu", -1); if (child == NULL) { device_printf(sc->dev, "failed to add pmu\n"); return (ENXIO); } } else if (sc->caps.pwr_ctrl) { child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pwrctl", -1); if (child == NULL) { device_printf(sc->dev, "failed to add pwrctl\n"); return (ENXIO); } } /* GPIO */ child = BUS_ADD_CHILD(sc->dev, 0, "gpio", -1); if (child == NULL) { device_printf(sc->dev, "failed to add gpio\n"); return (ENXIO); } error = chipc_set_mem_resource(sc, child, 0, 0, RM_MAX_END, 0, 0); if (error) { device_printf(sc->dev, "failed to set gpio memory resource: " "%d\n", error); return (error); } /* All remaining devices are SoC-only */ if (bhnd_get_attach_type(sc->dev) != BHND_ATTACH_NATIVE) return (0); /* UARTs */ for (u_int i = 0; i < min(sc->caps.num_uarts, CHIPC_UART_MAX); i++) { int irq_rid, mem_rid; irq_rid = 0; mem_rid = 0; child = BUS_ADD_CHILD(sc->dev, 0, "uart", -1); if (child == NULL) { device_printf(sc->dev, "failed to add uart%u\n", i); return (ENXIO); } /* Shared IRQ */ error = chipc_set_irq_resource(sc, child, irq_rid, 0); if (error) { device_printf(sc->dev, "failed to set uart%u irq %u\n", i, 0); return (error); } /* UART registers are mapped sequentially */ error = chipc_set_mem_resource(sc, child, mem_rid, CHIPC_UART(i), CHIPC_UART_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set uart%u memory " "resource: %d\n", i, error); return (error); } } /* Flash */ flash_bus = chipc_flash_bus_name(sc->caps.flash_type); if (flash_bus != NULL) { int rid; child = BUS_ADD_CHILD(sc->dev, 0, flash_bus, -1); if (child == NULL) { device_printf(sc->dev, "failed to add %s device\n", flash_bus); return (ENXIO); } /* flash memory mapping */ rid = 0; error = chipc_set_mem_resource(sc, child, rid, 0, RM_MAX_END, 1, 1); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } /* flashctrl registers */ rid++; error = chipc_set_mem_resource(sc, child, rid, CHIPC_SFLASH_BASE, CHIPC_SFLASH_SIZE, 0, 0); if (error) { device_printf(sc->dev, "failed to set flash memory " "resource %d: %d\n", rid, error); return (error); } } return (0); } /** * Determine the NVRAM data source for this device. * * The SPROM, OTP, and flash capability flags must be fully populated in * @p caps. * * @param sc chipc driver state. * @param caps capability flags to be used to derive NVRAM configuration. */ static bhnd_nvram_src chipc_find_nvram_src(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t otp_st, srom_ctrl; /* * We check for hardware presence in order of precedence. For example, * SPROM is is always used in preference to internal OTP if found. */ if (CHIPC_QUIRK(sc, SUPPORTS_SPROM) && caps->sprom) { srom_ctrl = bhnd_bus_read_4(sc->core, CHIPC_SPROM_CTRL); if (srom_ctrl & CHIPC_SRC_PRESENT) return (BHND_NVRAM_SRC_SPROM); } /* Check for programmed OTP H/W subregion (contains SROM data) */ if (CHIPC_QUIRK(sc, SUPPORTS_OTP) && caps->otp_size > 0) { /* TODO: need access to HND-OTP device */ if (!CHIPC_QUIRK(sc, OTP_HND)) { device_printf(sc->dev, "NVRAM unavailable: unsupported OTP controller.\n"); return (BHND_NVRAM_SRC_UNKNOWN); } otp_st = bhnd_bus_read_4(sc->core, CHIPC_OTPST); if (otp_st & CHIPC_OTPS_GUP_HW) return (BHND_NVRAM_SRC_OTP); } /* Check for flash */ if (caps->flash_type != CHIPC_FLASH_NONE) return (BHND_NVRAM_SRC_FLASH); /* No NVRAM hardware capability declared */ return (BHND_NVRAM_SRC_UNKNOWN); } /* Read and parse chipc capabilities */ static int chipc_read_caps(struct chipc_softc *sc, struct chipc_caps *caps) { uint32_t cap_reg; uint32_t cap_ext_reg; uint32_t regval; /* Fetch cap registers */ cap_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES); cap_ext_reg = 0; if (CHIPC_QUIRK(sc, SUPPORTS_CAP_EXT)) cap_ext_reg = bhnd_bus_read_4(sc->core, CHIPC_CAPABILITIES_EXT); /* Extract values */ caps->num_uarts = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_NUM_UART); caps->mipseb = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_MIPSEB); caps->uart_gpio = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_UARTGPIO); caps->uart_clock = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_UCLKSEL); caps->extbus_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_EXTBUS); caps->pwr_ctrl = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PWR_CTL); caps->jtag_master = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_JTAGP); caps->pll_type = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_PLL); caps->backplane_64 = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_BKPLN64); caps->boot_rom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ROM); caps->pmu = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_PMU); caps->eci = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_ECI); caps->sprom = CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_SPROM); caps->otp_size = CHIPC_GET_BITS(cap_reg, CHIPC_CAP_OTP_SIZE); caps->seci = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_SECI); caps->gsio = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_GSIO); caps->aob = CHIPC_GET_FLAG(cap_ext_reg, CHIPC_CAP2_AOB); /* Fetch OTP size for later IPX controller revisions */ if (CHIPC_QUIRK(sc, IPX_OTPL_SIZE)) { regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->otp_size = CHIPC_GET_BITS(regval, CHIPC_OTPL_SIZE); } /* Determine flash type and parameters */ caps->cfi_width = 0; switch (CHIPC_GET_BITS(cap_reg, CHIPC_CAP_FLASH)) { case CHIPC_CAP_SFLASH_ST: caps->flash_type = CHIPC_SFLASH_ST; break; case CHIPC_CAP_SFLASH_AT: caps->flash_type = CHIPC_SFLASH_AT; break; case CHIPC_CAP_NFLASH: /* unimplemented */ caps->flash_type = CHIPC_NFLASH; break; case CHIPC_CAP_PFLASH: caps->flash_type = CHIPC_PFLASH_CFI; /* determine cfi width */ regval = bhnd_bus_read_4(sc->core, CHIPC_FLASH_CFG); if (CHIPC_GET_FLAG(regval, CHIPC_FLASH_CFG_DS)) caps->cfi_width = 2; else caps->cfi_width = 1; break; case CHIPC_CAP_FLASH_NONE: caps->flash_type = CHIPC_FLASH_NONE; break; } /* Handle 4706_NFLASH fallback */ if (CHIPC_QUIRK(sc, 4706_NFLASH) && CHIPC_GET_FLAG(cap_reg, CHIPC_CAP_4706_NFLASH)) { caps->flash_type = CHIPC_NFLASH_4706; } /* Determine NVRAM source. Must occur after the SPROM/OTP/flash * capability flags have been populated. */ caps->nvram_src = chipc_find_nvram_src(sc, caps); /* Determine the SPROM offset within OTP (if any). SPROM-formatted * data is placed within the OTP general use region. */ caps->sprom_offset = 0; if (caps->nvram_src == BHND_NVRAM_SRC_OTP) { CHIPC_ASSERT_QUIRK(sc, OTP_IPX); /* Bit offset to GUP HW subregion containing SPROM data */ regval = bhnd_bus_read_4(sc->core, CHIPC_OTPLAYOUT); caps->sprom_offset = CHIPC_GET_BITS(regval, CHIPC_OTPL_GUP); /* Convert to bytes */ caps->sprom_offset /= 8; } return (0); } static int chipc_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int chipc_resume(device_t dev) { return (bus_generic_resume(dev)); } static void chipc_probe_nomatch(device_t dev, device_t child) { struct resource_list *rl; const char *name; name = device_get_name(child); if (name == NULL) name = "unknown device"; device_printf(dev, "<%s> at", name); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } printf(" (no driver attached)\n"); } static int chipc_print_child(device_t dev, device_t child) { struct resource_list *rl; int retval = 0; retval += bus_print_child_header(dev, child); rl = BUS_GET_RESOURCE_LIST(dev, child); if (rl != NULL) { retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); } retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } -static int -chipc_child_pnpinfo_str(device_t dev, device_t child, char *buf, - size_t buflen) -{ - if (buflen == 0) - return (EOVERFLOW); - - *buf = '\0'; - return (0); -} - -static int -chipc_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) -{ - if (buflen == 0) - return (EOVERFLOW); - - *buf = '\0'; - return (ENXIO); -} - static device_t chipc_add_child(device_t dev, u_int order, const char *name, int unit) { struct chipc_softc *sc; struct chipc_devinfo *dinfo; device_t child; sc = device_get_softc(dev); child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(struct chipc_devinfo), M_BHND, M_NOWAIT); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } resource_list_init(&dinfo->resources); dinfo->irq_mapped = false; device_set_ivars(child, dinfo); return (child); } static void chipc_child_deleted(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); if (dinfo != NULL) { /* Free the child's resource list */ resource_list_free(&dinfo->resources); /* Unmap the child's IRQ */ if (dinfo->irq_mapped) { bhnd_unmap_intr(dev, dinfo->irq); dinfo->irq_mapped = false; } free(dinfo, M_BHND); } device_set_ivars(child, NULL); } static struct resource_list * chipc_get_resource_list(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } /* Allocate region records for the given port, and add the port's memory * range to the mem_rman */ static int chipc_rman_init_regions (struct chipc_softc *sc, bhnd_port_type type, u_int port) { struct chipc_region *cr; rman_res_t start, end; u_int num_regions; int error; num_regions = bhnd_get_region_count(sc->dev, type, port); for (u_int region = 0; region < num_regions; region++) { /* Allocate new region record */ cr = chipc_alloc_region(sc, type, port, region); if (cr == NULL) return (ENODEV); /* Can't manage regions that cannot be allocated */ if (cr->cr_rid < 0) { BHND_DEBUG_DEV(sc->dev, "no rid for chipc region " "%s%u.%u", bhnd_port_type_name(type), port, region); chipc_free_region(sc, cr); continue; } /* Add to rman's managed range */ start = cr->cr_addr; end = cr->cr_end; if ((error = rman_manage_region(&sc->mem_rman, start, end))) { chipc_free_region(sc, cr); return (error); } /* Add to region list */ STAILQ_INSERT_TAIL(&sc->mem_regions, cr, cr_link); } return (0); } /* Initialize memory state for all chipc port regions */ static int chipc_init_rman(struct chipc_softc *sc) { u_int num_ports; int error; /* Port types for which we'll register chipc_region mappings */ bhnd_port_type types[] = { BHND_PORT_DEVICE }; /* Initialize resource manager */ sc->mem_rman.rm_start = 0; sc->mem_rman.rm_end = BUS_SPACE_MAXADDR; sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "ChipCommon Device Memory"; if ((error = rman_init(&sc->mem_rman))) { device_printf(sc->dev, "could not initialize mem_rman: %d\n", error); return (error); } /* Populate per-port-region state */ for (u_int i = 0; i < nitems(types); i++) { num_ports = bhnd_get_port_count(sc->dev, types[i]); for (u_int port = 0; port < num_ports; port++) { error = chipc_rman_init_regions(sc, types[i], port); if (error) { device_printf(sc->dev, "region init failed for %s%u: %d\n", bhnd_port_type_name(types[i]), port, error); goto failed; } } } return (0); failed: chipc_free_rman(sc); return (error); } /* Free memory management state */ static void chipc_free_rman(struct chipc_softc *sc) { struct chipc_region *cr, *cr_next; STAILQ_FOREACH_SAFE(cr, &sc->mem_regions, cr_link, cr_next) chipc_free_region(sc, cr); rman_fini(&sc->mem_rman); } /** * Return the rman instance for a given resource @p type, if any. * * @param sc The chipc device state. * @param type The resource type (e.g. SYS_RES_MEMORY, SYS_RES_IRQ, ...) */ static struct rman * chipc_get_rman(struct chipc_softc *sc, int type) { switch (type) { case SYS_RES_MEMORY: return (&sc->mem_rman); case SYS_RES_IRQ: /* We delegate IRQ resource management to the parent bus */ return (NULL); default: return (NULL); }; } static struct resource * chipc_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct chipc_softc *sc; struct chipc_region *cr; struct resource_list_entry *rle; struct resource *rv; struct rman *rm; int error; bool passthrough, isdefault; sc = device_get_softc(dev); passthrough = (device_get_parent(child) != dev); isdefault = RMAN_IS_DEFAULT_RANGE(start, end); rle = NULL; /* Fetch the resource manager, delegate request if necessary */ rm = chipc_get_rman(sc, type); if (rm == NULL) { /* Requested resource type is delegated to our parent */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Populate defaults */ if (!passthrough && isdefault) { /* Fetch the resource list entry. */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, *rid); if (rle == NULL) { device_printf(dev, "default resource %#x type %d for child %s " "not found\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (rle->res != NULL) { device_printf(dev, "resource entry %#x type %d for child %s is busy " "[%d]\n", *rid, type, device_get_nameunit(child), rman_get_flags(rle->res)); return (NULL); } start = rle->start; end = rle->end; count = ulmax(count, rle->count); } /* Locate a mapping region */ if ((cr = chipc_find_region(sc, start, end)) == NULL) { /* Resource requests outside our shared port regions can be * delegated to our parent. */ rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* * As a special case, children that map the complete ChipCommon register * block are delegated to our parent. * * The rman API does not support sharing resources that are not * identical in size; since we allocate subregions to various children, * any children that need to map the entire register block (e.g. because * they require access to discontiguous register ranges) must make the * allocation through our parent, where we hold a compatible * RF_SHAREABLE allocation. */ if (cr == sc->core_region && cr->cr_addr == start && cr->cr_end == end && cr->cr_count == count) { rv = bus_generic_rl_alloc_resource(dev, child, type, rid, start, end, count, flags); return (rv); } /* Try to retain a region reference */ if ((error = chipc_retain_region(sc, cr, RF_ALLOCATED))) return (NULL); /* Make our rman reservation */ rv = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE, child); if (rv == NULL) { chipc_release_region(sc, cr, RF_ALLOCATED); return (NULL); } rman_set_rid(rv, *rid); /* Activate */ if (flags & RF_ACTIVE) { error = bus_activate_resource(child, type, *rid, rv); if (error) { device_printf(dev, "failed to activate entry %#x type %d for " "child %s: %d\n", *rid, type, device_get_nameunit(child), error); chipc_release_region(sc, cr, RF_ALLOCATED); rman_release_resource(rv); return (NULL); } } /* Update child's resource list entry */ if (rle != NULL) { rle->res = rv; rle->start = rman_get_start(rv); rle->end = rman_get_end(rv); rle->count = rman_get_size(rv); } return (rv); } static int chipc_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; struct resource_list_entry *rle; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(sc, type); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_rl_release_resource(dev, child, type, rid, r)); } /* Locate the mapping region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Deactivate resources */ if (rman_get_flags(r) & RF_ACTIVE) { error = BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, r); if (error) return (error); } if ((error = rman_release_resource(r))) return (error); /* Drop allocation reference */ chipc_release_region(sc, cr, RF_ALLOCATED); /* Clear reference from the resource list entry if exists */ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child), type, rid); if (rle != NULL) rle->res = NULL; return (0); } static int chipc_adjust_resource(device_t dev, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(sc, type); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_adjust_resource(dev, child, type, r, start, end)); } /* The range is limited to the existing region mapping */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); if (end <= start) return (EINVAL); if (start < cr->cr_addr || end > cr->cr_end) return (EINVAL); /* Range falls within the existing region */ return (rman_adjust_resource(r, start, end)); } /** * Retain an RF_ACTIVE reference to the region mapping @p r, and * configure @p r with its subregion values. * * @param sc Driver instance state. * @param child Requesting child device. * @param type resource type of @p r. * @param rid resource id of @p r * @param r resource to be activated. * @param req_direct If true, failure to allocate a direct bhnd resource * will be treated as an error. If false, the resource will not be marked * as RF_ACTIVE if bhnd direct resource allocation fails. */ static int chipc_try_activate_resource(struct chipc_softc *sc, device_t child, int type, int rid, struct resource *r, bool req_direct) { struct rman *rm; struct chipc_region *cr; bhnd_size_t cr_offset; rman_res_t r_start, r_end, r_size; int error; rm = chipc_get_rman(sc, type); if (rm == NULL || !rman_is_region_manager(r, rm)) return (EINVAL); r_start = rman_get_start(r); r_end = rman_get_end(r); r_size = rman_get_size(r); /* Find the corresponding chipc region */ cr = chipc_find_region(sc, r_start, r_end); if (cr == NULL) return (EINVAL); /* Calculate subregion offset within the chipc region */ cr_offset = r_start - cr->cr_addr; /* Retain (and activate, if necessary) the chipc region */ if ((error = chipc_retain_region(sc, cr, RF_ACTIVE))) return (error); /* Configure child resource with its subregion values. */ if (cr->cr_res->direct) { error = chipc_init_child_resource(r, cr->cr_res->res, cr_offset, r_size); if (error) goto cleanup; /* Mark active */ if ((error = rman_activate_resource(r))) goto cleanup; } else if (req_direct) { error = ENOMEM; goto cleanup; } return (0); cleanup: chipc_release_region(sc, cr, RF_ACTIVE); return (error); } static int chipc_activate_bhnd_resource(device_t dev, device_t child, int type, int rid, struct bhnd_resource *r) { struct chipc_softc *sc; struct rman *rm; int error; sc = device_get_softc(dev); /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(sc, type); if (rm == NULL || !rman_is_region_manager(r->res, rm)) { return (bhnd_bus_generic_activate_resource(dev, child, type, rid, r)); } /* Try activating the chipc region resource */ error = chipc_try_activate_resource(sc, child, type, rid, r->res, false); if (error) return (error); /* Mark the child resource as direct according to the returned resource * state */ if (rman_get_flags(r->res) & RF_ACTIVE) r->direct = true; return (0); } static int chipc_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct chipc_softc *sc; struct rman *rm; sc = device_get_softc(dev); /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(sc, type); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_activate_resource(dev, child, type, rid, r)); } /* Try activating the chipc region-based resource */ return (chipc_try_activate_resource(sc, child, type, rid, r, true)); } /** * Default bhndb(4) implementation of BUS_DEACTIVATE_RESOURCE(). */ static int chipc_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct chipc_softc *sc; struct chipc_region *cr; struct rman *rm; int error; sc = device_get_softc(dev); /* Handled by parent bus? */ rm = chipc_get_rman(sc, type); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_deactivate_resource(dev, child, type, rid, r)); } /* Find the corresponding chipc region */ cr = chipc_find_region(sc, rman_get_start(r), rman_get_end(r)); if (cr == NULL) return (EINVAL); /* Mark inactive */ if ((error = rman_deactivate_resource(r))) return (error); /* Drop associated RF_ACTIVE reference */ chipc_release_region(sc, cr, RF_ACTIVE); return (0); } /** * Examine bus state and make a best effort determination of whether it's * likely safe to enable the muxed SPROM pins. * * On devices that do not use SPROM pin muxing, always returns true. * * @param sc chipc driver state. */ static bool chipc_should_enable_muxed_sprom(struct chipc_softc *sc) { device_t *devs; device_t hostb; device_t parent; int devcount; int error; bool result; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (true); mtx_lock(&Giant); /* for newbus */ parent = device_get_parent(sc->dev); hostb = bhnd_bus_find_hostb_device(parent); if ((error = device_get_children(parent, &devs, &devcount))) { mtx_unlock(&Giant); return (false); } /* Reject any active devices other than ChipCommon, or the * host bridge (if any). */ result = true; for (int i = 0; i < devcount; i++) { if (devs[i] == hostb || devs[i] == sc->dev) continue; if (!device_is_attached(devs[i])) continue; if (device_is_suspended(devs[i])) continue; /* Active device; assume SPROM is busy */ result = false; break; } free(devs, M_TEMP); mtx_unlock(&Giant); return (result); } static int chipc_enable_sprom(device_t dev) { struct chipc_softc *sc; int error; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Already enabled? */ if (sc->sprom_refcnt >= 1) { sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (0); } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: error = chipc_enable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: error = chipc_enable_otp_power(sc); break; default: error = 0; break; } /* Bump the reference count */ if (error == 0) sc->sprom_refcnt++; CHIPC_UNLOCK(sc); return (error); } static void chipc_disable_sprom(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); CHIPC_LOCK(sc); /* Check reference count, skip disable if in-use. */ KASSERT(sc->sprom_refcnt > 0, ("sprom refcnt overrelease")); sc->sprom_refcnt--; if (sc->sprom_refcnt > 0) { CHIPC_UNLOCK(sc); return; } switch (sc->caps.nvram_src) { case BHND_NVRAM_SRC_SPROM: chipc_disable_sprom_pins(sc); break; case BHND_NVRAM_SRC_OTP: chipc_disable_otp_power(sc); break; default: break; } CHIPC_UNLOCK(sc); } static int chipc_enable_otp_power(struct chipc_softc *sc) { // TODO: Enable OTP resource via PMU, and wait up to 100 usec for // OTPS_READY to be set in `optstatus`. return (0); } static void chipc_disable_otp_power(struct chipc_softc *sc) { // TODO: Disable OTP resource via PMU } /** * If required by this device, enable access to the SPROM. * * @param sc chipc driver state. */ static int chipc_enable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins already enabled")); /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (0); /* Check whether bus is busy */ if (!chipc_should_enable_muxed_sprom(sc)) return (EBUSY); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl &= ~CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return (0); } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } /* Refuse to proceed on unsupported devices with muxed SPROM pins */ device_printf(sc->dev, "muxed sprom lines on unrecognized device\n"); return (ENXIO); } /** * If required by this device, revert any GPIO/pin configuration applied * to allow SPROM access. * * @param sc chipc driver state. */ static void chipc_disable_sprom_pins(struct chipc_softc *sc) { uint32_t cctrl; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return; CHIPC_LOCK_ASSERT(sc, MA_OWNED); KASSERT(sc->sprom_refcnt == 0, ("sprom pins in use")); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); /* 4331 devices */ if (CHIPC_QUIRK(sc, 4331_EXTPA_MUX_SPROM)) { cctrl |= CHIPC_CCTRL4331_EXTPA_EN; if (CHIPC_QUIRK(sc, 4331_GPIO2_5_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_ON_GPIO2_5; if (CHIPC_QUIRK(sc, 4331_EXTPA2_MUX_SPROM)) cctrl |= CHIPC_CCTRL4331_EXTPA_EN2; bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); return; } /* 4360 devices */ if (CHIPC_QUIRK(sc, 4360_FEM_MUX_SPROM)) { /* Unimplemented */ } } static uint32_t chipc_read_chipst(device_t dev) { struct chipc_softc *sc = device_get_softc(dev); return (bhnd_bus_read_4(sc->core, CHIPC_CHIPST)); } static void chipc_write_chipctrl(device_t dev, uint32_t value, uint32_t mask) { struct chipc_softc *sc; uint32_t cctrl; sc = device_get_softc(dev); CHIPC_LOCK(sc); cctrl = bhnd_bus_read_4(sc->core, CHIPC_CHIPCTRL); cctrl = (cctrl & ~mask) | (value | mask); bhnd_bus_write_4(sc->core, CHIPC_CHIPCTRL, cctrl); CHIPC_UNLOCK(sc); } static struct chipc_caps * chipc_get_caps(device_t dev) { struct chipc_softc *sc; sc = device_get_softc(dev); return (&sc->caps); } static device_method_t chipc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, chipc_probe), DEVMETHOD(device_attach, chipc_attach), DEVMETHOD(device_detach, chipc_detach), DEVMETHOD(device_suspend, chipc_suspend), DEVMETHOD(device_resume, chipc_resume), /* Bus interface */ DEVMETHOD(bus_probe_nomatch, chipc_probe_nomatch), DEVMETHOD(bus_print_child, chipc_print_child), - DEVMETHOD(bus_child_pnpinfo_str, chipc_child_pnpinfo_str), - DEVMETHOD(bus_child_location_str, chipc_child_location_str), DEVMETHOD(bus_add_child, chipc_add_child), DEVMETHOD(bus_child_deleted, chipc_child_deleted), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_alloc_resource, chipc_alloc_resource), DEVMETHOD(bus_release_resource, chipc_release_resource), DEVMETHOD(bus_adjust_resource, chipc_adjust_resource), DEVMETHOD(bus_activate_resource, chipc_activate_resource), DEVMETHOD(bus_deactivate_resource, chipc_deactivate_resource), DEVMETHOD(bus_get_resource_list, chipc_get_resource_list), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_config_intr, bus_generic_config_intr), DEVMETHOD(bus_bind_intr, bus_generic_bind_intr), DEVMETHOD(bus_describe_intr, bus_generic_describe_intr), /* BHND bus inteface */ DEVMETHOD(bhnd_bus_activate_resource, chipc_activate_bhnd_resource), /* ChipCommon interface */ DEVMETHOD(bhnd_chipc_read_chipst, chipc_read_chipst), DEVMETHOD(bhnd_chipc_write_chipctrl, chipc_write_chipctrl), DEVMETHOD(bhnd_chipc_enable_sprom, chipc_enable_sprom), DEVMETHOD(bhnd_chipc_disable_sprom, chipc_disable_sprom), DEVMETHOD(bhnd_chipc_get_caps, chipc_get_caps), DEVMETHOD_END }; DEFINE_CLASS_0(bhnd_chipc, bhnd_chipc_driver, chipc_methods, sizeof(struct chipc_softc)); EARLY_DRIVER_MODULE(bhnd_chipc, bhnd, bhnd_chipc_driver, bhnd_chipc_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_DEPEND(bhnd_chipc, bhnd, 1, 1, 1); MODULE_VERSION(bhnd_chipc, 1); diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c index 650300c805a1..c5a3e6dd200e 100644 --- a/sys/dev/cxgbe/t4_main.c +++ b/sys/dev/cxgbe/t4_main.c @@ -1,13108 +1,13107 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011 Chelsio Communications, Inc. * All rights reserved. * Written by: Navdeep Parhar * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_inet.h" #include "opt_inet6.h" #include "opt_kern_tls.h" #include "opt_ratelimit.h" #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #endif #include #include #ifdef KERN_TLS #include #endif #if defined(__i386__) || defined(__amd64__) #include #include #include #include #endif #ifdef DDB #include #include #endif #include "common/common.h" #include "common/t4_msg.h" #include "common/t4_regs.h" #include "common/t4_regs_values.h" #include "cudbg/cudbg.h" #include "t4_clip.h" #include "t4_ioctl.h" #include "t4_l2t.h" #include "t4_mp_ring.h" #include "t4_if.h" #include "t4_smt.h" /* T4 bus driver interface */ static int t4_probe(device_t); static int t4_attach(device_t); static int t4_detach(device_t); -static int t4_child_location_str(device_t, device_t, char *, size_t); +static int t4_child_location(device_t, device_t, struct sbuf *); static int t4_ready(device_t); static int t4_read_port_device(device_t, int, device_t *); static int t4_suspend(device_t); static int t4_resume(device_t); static int t4_reset_prepare(device_t, device_t); static int t4_reset_post(device_t, device_t); static device_method_t t4_methods[] = { DEVMETHOD(device_probe, t4_probe), DEVMETHOD(device_attach, t4_attach), DEVMETHOD(device_detach, t4_detach), DEVMETHOD(device_suspend, t4_suspend), DEVMETHOD(device_resume, t4_resume), - DEVMETHOD(bus_child_location_str, t4_child_location_str), + DEVMETHOD(bus_child_location, t4_child_location), DEVMETHOD(bus_reset_prepare, t4_reset_prepare), DEVMETHOD(bus_reset_post, t4_reset_post), DEVMETHOD(t4_is_main_ready, t4_ready), DEVMETHOD(t4_read_port_device, t4_read_port_device), DEVMETHOD_END }; static driver_t t4_driver = { "t4nex", t4_methods, sizeof(struct adapter) }; /* T4 port (cxgbe) interface */ static int cxgbe_probe(device_t); static int cxgbe_attach(device_t); static int cxgbe_detach(device_t); device_method_t cxgbe_methods[] = { DEVMETHOD(device_probe, cxgbe_probe), DEVMETHOD(device_attach, cxgbe_attach), DEVMETHOD(device_detach, cxgbe_detach), { 0, 0 } }; static driver_t cxgbe_driver = { "cxgbe", cxgbe_methods, sizeof(struct port_info) }; /* T4 VI (vcxgbe) interface */ static int vcxgbe_probe(device_t); static int vcxgbe_attach(device_t); static int vcxgbe_detach(device_t); static device_method_t vcxgbe_methods[] = { DEVMETHOD(device_probe, vcxgbe_probe), DEVMETHOD(device_attach, vcxgbe_attach), DEVMETHOD(device_detach, vcxgbe_detach), { 0, 0 } }; static driver_t vcxgbe_driver = { "vcxgbe", vcxgbe_methods, sizeof(struct vi_info) }; static d_ioctl_t t4_ioctl; static struct cdevsw t4_cdevsw = { .d_version = D_VERSION, .d_ioctl = t4_ioctl, .d_name = "t4nex", }; /* T5 bus driver interface */ static int t5_probe(device_t); static device_method_t t5_methods[] = { DEVMETHOD(device_probe, t5_probe), DEVMETHOD(device_attach, t4_attach), DEVMETHOD(device_detach, t4_detach), DEVMETHOD(device_suspend, t4_suspend), DEVMETHOD(device_resume, t4_resume), - DEVMETHOD(bus_child_location_str, t4_child_location_str), + DEVMETHOD(bus_child_location, t4_child_location), DEVMETHOD(bus_reset_prepare, t4_reset_prepare), DEVMETHOD(bus_reset_post, t4_reset_post), DEVMETHOD(t4_is_main_ready, t4_ready), DEVMETHOD(t4_read_port_device, t4_read_port_device), DEVMETHOD_END }; static driver_t t5_driver = { "t5nex", t5_methods, sizeof(struct adapter) }; /* T5 port (cxl) interface */ static driver_t cxl_driver = { "cxl", cxgbe_methods, sizeof(struct port_info) }; /* T5 VI (vcxl) interface */ static driver_t vcxl_driver = { "vcxl", vcxgbe_methods, sizeof(struct vi_info) }; /* T6 bus driver interface */ static int t6_probe(device_t); static device_method_t t6_methods[] = { DEVMETHOD(device_probe, t6_probe), DEVMETHOD(device_attach, t4_attach), DEVMETHOD(device_detach, t4_detach), DEVMETHOD(device_suspend, t4_suspend), DEVMETHOD(device_resume, t4_resume), - DEVMETHOD(bus_child_location_str, t4_child_location_str), + DEVMETHOD(bus_child_location, t4_child_location), DEVMETHOD(bus_reset_prepare, t4_reset_prepare), DEVMETHOD(bus_reset_post, t4_reset_post), DEVMETHOD(t4_is_main_ready, t4_ready), DEVMETHOD(t4_read_port_device, t4_read_port_device), DEVMETHOD_END }; static driver_t t6_driver = { "t6nex", t6_methods, sizeof(struct adapter) }; /* T6 port (cc) interface */ static driver_t cc_driver = { "cc", cxgbe_methods, sizeof(struct port_info) }; /* T6 VI (vcc) interface */ static driver_t vcc_driver = { "vcc", vcxgbe_methods, sizeof(struct vi_info) }; /* ifnet interface */ static void cxgbe_init(void *); static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); static int cxgbe_transmit(struct ifnet *, struct mbuf *); static void cxgbe_qflush(struct ifnet *); #if defined(KERN_TLS) || defined(RATELIMIT) static int cxgbe_snd_tag_alloc(struct ifnet *, union if_snd_tag_alloc_params *, struct m_snd_tag **); static int cxgbe_snd_tag_modify(struct m_snd_tag *, union if_snd_tag_modify_params *); static int cxgbe_snd_tag_query(struct m_snd_tag *, union if_snd_tag_query_params *); static void cxgbe_snd_tag_free(struct m_snd_tag *); #endif MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); /* * Correct lock order when you need to acquire multiple locks is t4_list_lock, * then ADAPTER_LOCK, then t4_uld_list_lock. */ static struct sx t4_list_lock; SLIST_HEAD(, adapter) t4_list; #ifdef TCP_OFFLOAD static struct sx t4_uld_list_lock; SLIST_HEAD(, uld_info) t4_uld_list; #endif /* * Tunables. See tweak_tunables() too. * * Each tunable is set to a default value here if it's known at compile-time. * Otherwise it is set to -n as an indication to tweak_tunables() that it should * provide a reasonable default (upto n) when the driver is loaded. * * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to * T5 are under hw.cxl. */ SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) parameters"); SYSCTL_NODE(_hw, OID_AUTO, cxl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) T5+ parameters"); SYSCTL_NODE(_hw_cxgbe, OID_AUTO, toe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) TOE parameters"); /* * Number of queues for tx and rx, NIC and offload. */ #define NTXQ 16 int t4_ntxq = -NTXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq, CTLFLAG_RDTUN, &t4_ntxq, 0, "Number of TX queues per port"); TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */ #define NRXQ 8 int t4_nrxq = -NRXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq, CTLFLAG_RDTUN, &t4_nrxq, 0, "Number of RX queues per port"); TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */ #define NTXQ_VI 1 static int t4_ntxq_vi = -NTXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq_vi, CTLFLAG_RDTUN, &t4_ntxq_vi, 0, "Number of TX queues per VI"); #define NRXQ_VI 1 static int t4_nrxq_vi = -NRXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq_vi, CTLFLAG_RDTUN, &t4_nrxq_vi, 0, "Number of RX queues per VI"); static int t4_rsrv_noflowq = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, rsrv_noflowq, CTLFLAG_RDTUN, &t4_rsrv_noflowq, 0, "Reserve TX queue 0 of each VI for non-flowid packets"); #if defined(TCP_OFFLOAD) || defined(RATELIMIT) #define NOFLDTXQ 8 static int t4_nofldtxq = -NOFLDTXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq, CTLFLAG_RDTUN, &t4_nofldtxq, 0, "Number of offload TX queues per port"); #define NOFLDRXQ 2 static int t4_nofldrxq = -NOFLDRXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0, "Number of offload RX queues per port"); #define NOFLDTXQ_VI 1 static int t4_nofldtxq_vi = -NOFLDTXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq_vi, CTLFLAG_RDTUN, &t4_nofldtxq_vi, 0, "Number of offload TX queues per VI"); #define NOFLDRXQ_VI 1 static int t4_nofldrxq_vi = -NOFLDRXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq_vi, CTLFLAG_RDTUN, &t4_nofldrxq_vi, 0, "Number of offload RX queues per VI"); #define TMR_IDX_OFLD 1 int t4_tmr_idx_ofld = TMR_IDX_OFLD; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_ofld, CTLFLAG_RDTUN, &t4_tmr_idx_ofld, 0, "Holdoff timer index for offload queues"); #define PKTC_IDX_OFLD (-1) int t4_pktc_idx_ofld = PKTC_IDX_OFLD; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_ofld, CTLFLAG_RDTUN, &t4_pktc_idx_ofld, 0, "holdoff packet counter index for offload queues"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_keepalive_idle = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_idle, CTLFLAG_RDTUN, &t4_toe_keepalive_idle, 0, "TOE keepalive idle timer (us)"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_keepalive_interval = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_interval, CTLFLAG_RDTUN, &t4_toe_keepalive_interval, 0, "TOE keepalive interval timer (us)"); /* 0 means chip/fw default, non-zero number is # of keepalives before abort */ static int t4_toe_keepalive_count = 0; SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, keepalive_count, CTLFLAG_RDTUN, &t4_toe_keepalive_count, 0, "Number of TOE keepalive probes before abort"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_rexmt_min = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_min, CTLFLAG_RDTUN, &t4_toe_rexmt_min, 0, "Minimum TOE retransmit interval (us)"); /* 0 means chip/fw default, non-zero number is value in microseconds */ static u_long t4_toe_rexmt_max = 0; SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_max, CTLFLAG_RDTUN, &t4_toe_rexmt_max, 0, "Maximum TOE retransmit interval (us)"); /* 0 means chip/fw default, non-zero number is # of rexmt before abort */ static int t4_toe_rexmt_count = 0; SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, rexmt_count, CTLFLAG_RDTUN, &t4_toe_rexmt_count, 0, "Number of TOE retransmissions before abort"); /* -1 means chip/fw default, other values are raw backoff values to use */ static int t4_toe_rexmt_backoff[16] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; SYSCTL_NODE(_hw_cxgbe_toe, OID_AUTO, rexmt_backoff, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) TOE retransmit backoff values"); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 0, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[0], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 1, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[1], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 2, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[2], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 3, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[3], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 4, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[4], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 5, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[5], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 6, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[6], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 7, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[7], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 8, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[8], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 9, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[9], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 10, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[10], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 11, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[11], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 12, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[12], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 13, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[13], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 14, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[14], 0, ""); SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 15, CTLFLAG_RDTUN, &t4_toe_rexmt_backoff[15], 0, ""); static int t4_toe_tls_rx_timeout = 5; SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, tls_rx_timeout, CTLFLAG_RDTUN, &t4_toe_tls_rx_timeout, 0, "Timeout in seconds to downgrade TLS sockets to plain TOE"); #endif #ifdef DEV_NETMAP #define NN_MAIN_VI (1 << 0) /* Native netmap on the main VI */ #define NN_EXTRA_VI (1 << 1) /* Native netmap on the extra VI(s) */ static int t4_native_netmap = NN_EXTRA_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, native_netmap, CTLFLAG_RDTUN, &t4_native_netmap, 0, "Native netmap support. bit 0 = main VI, bit 1 = extra VIs"); #define NNMTXQ 8 static int t4_nnmtxq = -NNMTXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq, CTLFLAG_RDTUN, &t4_nnmtxq, 0, "Number of netmap TX queues"); #define NNMRXQ 8 static int t4_nnmrxq = -NNMRXQ; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq, CTLFLAG_RDTUN, &t4_nnmrxq, 0, "Number of netmap RX queues"); #define NNMTXQ_VI 2 static int t4_nnmtxq_vi = -NNMTXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq_vi, CTLFLAG_RDTUN, &t4_nnmtxq_vi, 0, "Number of netmap TX queues per VI"); #define NNMRXQ_VI 2 static int t4_nnmrxq_vi = -NNMRXQ_VI; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq_vi, CTLFLAG_RDTUN, &t4_nnmrxq_vi, 0, "Number of netmap RX queues per VI"); #endif /* * Holdoff parameters for ports. */ #define TMR_IDX 1 int t4_tmr_idx = TMR_IDX; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx, CTLFLAG_RDTUN, &t4_tmr_idx, 0, "Holdoff timer index"); TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */ #define PKTC_IDX (-1) int t4_pktc_idx = PKTC_IDX; SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx, CTLFLAG_RDTUN, &t4_pktc_idx, 0, "Holdoff packet counter index"); TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */ /* * Size (# of entries) of each tx and rx queue. */ unsigned int t4_qsize_txq = TX_EQ_QSIZE; SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, &t4_qsize_txq, 0, "Number of descriptors in each TX queue"); unsigned int t4_qsize_rxq = RX_IQ_QSIZE; SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, &t4_qsize_rxq, 0, "Number of descriptors in each RX queue"); /* * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). */ int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; SYSCTL_INT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &t4_intr_types, 0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)"); /* * Configuration file. All the _CF names here are special. */ #define DEFAULT_CF "default" #define BUILTIN_CF "built-in" #define FLASH_CF "flash" #define UWIRE_CF "uwire" #define FPGA_CF "fpga" static char t4_cfg_file[32] = DEFAULT_CF; SYSCTL_STRING(_hw_cxgbe, OID_AUTO, config_file, CTLFLAG_RDTUN, t4_cfg_file, sizeof(t4_cfg_file), "Firmware configuration file"); /* * PAUSE settings (bit 0, 1, 2 = rx_pause, tx_pause, pause_autoneg respectively). * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water * mark or when signalled to do so, 0 to never emit PAUSE. * pause_autoneg = 1 means PAUSE will be negotiated if possible and the * negotiated settings will override rx_pause/tx_pause. * Otherwise rx_pause/tx_pause are applied forcibly. */ static int t4_pause_settings = PAUSE_RX | PAUSE_TX | PAUSE_AUTONEG; SYSCTL_INT(_hw_cxgbe, OID_AUTO, pause_settings, CTLFLAG_RDTUN, &t4_pause_settings, 0, "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)"); /* * Forward Error Correction settings (bit 0, 1 = RS, BASER respectively). * -1 to run with the firmware default. Same as FEC_AUTO (bit 5) * 0 to disable FEC. */ static int t4_fec = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0, "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)"); /* * Link autonegotiation. * -1 to run with the firmware default. * 0 to disable. * 1 to enable. */ static int t4_autoneg = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, autoneg, CTLFLAG_RDTUN, &t4_autoneg, 0, "Link autonegotiation"); /* * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, * encouraged respectively). '-n' is the same as 'n' except the firmware * version used in the checks is read from the firmware bundled with the driver. */ static int t4_fw_install = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fw_install, CTLFLAG_RDTUN, &t4_fw_install, 0, "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)"); /* * ASIC features that will be used. Disable the ones you don't want so that the * chip resources aren't wasted on features that will not be used. */ static int t4_nbmcaps_allowed = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, nbmcaps_allowed, CTLFLAG_RDTUN, &t4_nbmcaps_allowed, 0, "Default NBM capabilities"); static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ SYSCTL_INT(_hw_cxgbe, OID_AUTO, linkcaps_allowed, CTLFLAG_RDTUN, &t4_linkcaps_allowed, 0, "Default link capabilities"); static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | FW_CAPS_CONFIG_SWITCH_EGRESS; SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN, &t4_switchcaps_allowed, 0, "Default switch capabilities"); #ifdef RATELIMIT static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC | FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD; #else static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC | FW_CAPS_CONFIG_NIC_HASHFILTER; #endif SYSCTL_INT(_hw_cxgbe, OID_AUTO, niccaps_allowed, CTLFLAG_RDTUN, &t4_niccaps_allowed, 0, "Default NIC capabilities"); static int t4_toecaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, toecaps_allowed, CTLFLAG_RDTUN, &t4_toecaps_allowed, 0, "Default TCP offload capabilities"); static int t4_rdmacaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, rdmacaps_allowed, CTLFLAG_RDTUN, &t4_rdmacaps_allowed, 0, "Default RDMA capabilities"); static int t4_cryptocaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, cryptocaps_allowed, CTLFLAG_RDTUN, &t4_cryptocaps_allowed, 0, "Default crypto capabilities"); static int t4_iscsicaps_allowed = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, iscsicaps_allowed, CTLFLAG_RDTUN, &t4_iscsicaps_allowed, 0, "Default iSCSI capabilities"); static int t4_fcoecaps_allowed = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, fcoecaps_allowed, CTLFLAG_RDTUN, &t4_fcoecaps_allowed, 0, "Default FCoE capabilities"); static int t5_write_combine = 0; SYSCTL_INT(_hw_cxl, OID_AUTO, write_combine, CTLFLAG_RDTUN, &t5_write_combine, 0, "Use WC instead of UC for BAR2"); static int t4_num_vis = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, num_vis, CTLFLAG_RDTUN, &t4_num_vis, 0, "Number of VIs per port"); /* * PCIe Relaxed Ordering. * -1: driver should figure out a good value. * 0: disable RO. * 1: enable RO. * 2: leave RO alone. */ static int pcie_relaxed_ordering = -1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, pcie_relaxed_ordering, CTLFLAG_RDTUN, &pcie_relaxed_ordering, 0, "PCIe Relaxed Ordering: 0 = disable, 1 = enable, 2 = leave alone"); static int t4_panic_on_fatal_err = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RWTUN, &t4_panic_on_fatal_err, 0, "panic on fatal errors"); static int t4_reset_on_fatal_err = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, reset_on_fatal_err, CTLFLAG_RWTUN, &t4_reset_on_fatal_err, 0, "reset adapter on fatal errors"); static int t4_tx_vm_wr = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_vm_wr, CTLFLAG_RWTUN, &t4_tx_vm_wr, 0, "Use VM work requests to transmit packets."); /* * Set to non-zero to enable the attack filter. A packet that matches any of * these conditions will get dropped on ingress: * 1) IP && source address == destination address. * 2) TCP/IP && source address is not a unicast address. * 3) TCP/IP && destination address is not a unicast address. * 4) IP && source address is loopback (127.x.y.z). * 5) IP && destination address is loopback (127.x.y.z). * 6) IPv6 && source address == destination address. * 7) IPv6 && source address is not a unicast address. * 8) IPv6 && source address is loopback (::1/128). * 9) IPv6 && destination address is loopback (::1/128). * 10) IPv6 && source address is unspecified (::/128). * 11) IPv6 && destination address is unspecified (::/128). * 12) TCP/IPv6 && source address is multicast (ff00::/8). * 13) TCP/IPv6 && destination address is multicast (ff00::/8). */ static int t4_attack_filter = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, attack_filter, CTLFLAG_RDTUN, &t4_attack_filter, 0, "Drop suspicious traffic"); static int t4_drop_ip_fragments = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_ip_fragments, CTLFLAG_RDTUN, &t4_drop_ip_fragments, 0, "Drop IP fragments"); static int t4_drop_pkts_with_l2_errors = 1; SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l2_errors, CTLFLAG_RDTUN, &t4_drop_pkts_with_l2_errors, 0, "Drop all frames with Layer 2 length or checksum errors"); static int t4_drop_pkts_with_l3_errors = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l3_errors, CTLFLAG_RDTUN, &t4_drop_pkts_with_l3_errors, 0, "Drop all frames with IP version, length, or checksum errors"); static int t4_drop_pkts_with_l4_errors = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l4_errors, CTLFLAG_RDTUN, &t4_drop_pkts_with_l4_errors, 0, "Drop all frames with Layer 4 length, checksum, or other errors"); #ifdef TCP_OFFLOAD /* * TOE tunables. */ static int t4_cop_managed_offloading = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, cop_managed_offloading, CTLFLAG_RDTUN, &t4_cop_managed_offloading, 0, "COP (Connection Offload Policy) controls all TOE offload"); #endif #ifdef KERN_TLS /* * This enables KERN_TLS for all adapters if set. */ static int t4_kern_tls = 0; SYSCTL_INT(_hw_cxgbe, OID_AUTO, kern_tls, CTLFLAG_RDTUN, &t4_kern_tls, 0, "Enable KERN_TLS mode for all supported adapters"); SYSCTL_NODE(_hw_cxgbe, OID_AUTO, tls, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "cxgbe(4) KERN_TLS parameters"); static int t4_tls_inline_keys = 0; SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, inline_keys, CTLFLAG_RDTUN, &t4_tls_inline_keys, 0, "Always pass TLS keys in work requests (1) or attempt to store TLS keys " "in card memory."); static int t4_tls_combo_wrs = 0; SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, combo_wrs, CTLFLAG_RDTUN, &t4_tls_combo_wrs, 0, "Attempt to combine TCB field updates with TLS record work requests."); #endif /* Functions used by VIs to obtain unique MAC addresses for each VI. */ static int vi_mac_funcs[] = { FW_VI_FUNC_ETH, FW_VI_FUNC_OFLD, FW_VI_FUNC_IWARP, FW_VI_FUNC_OPENISCSI, FW_VI_FUNC_OPENFCOE, FW_VI_FUNC_FOISCSI, FW_VI_FUNC_FOFCOE, }; struct intrs_and_queues { uint16_t intr_type; /* INTx, MSI, or MSI-X */ uint16_t num_vis; /* number of VIs for each port */ uint16_t nirq; /* Total # of vectors */ uint16_t ntxq; /* # of NIC txq's for each port */ uint16_t nrxq; /* # of NIC rxq's for each port */ uint16_t nofldtxq; /* # of TOE/ETHOFLD txq's for each port */ uint16_t nofldrxq; /* # of TOE rxq's for each port */ uint16_t nnmtxq; /* # of netmap txq's */ uint16_t nnmrxq; /* # of netmap rxq's */ /* The vcxgbe/vcxl interfaces use these and not the ones above. */ uint16_t ntxq_vi; /* # of NIC txq's */ uint16_t nrxq_vi; /* # of NIC rxq's */ uint16_t nofldtxq_vi; /* # of TOE txq's */ uint16_t nofldrxq_vi; /* # of TOE rxq's */ uint16_t nnmtxq_vi; /* # of netmap txq's */ uint16_t nnmrxq_vi; /* # of netmap rxq's */ }; static void setup_memwin(struct adapter *); static void position_memwin(struct adapter *, int, uint32_t); static int validate_mem_range(struct adapter *, uint32_t, uint32_t); static int fwmtype_to_hwmtype(int); static int validate_mt_off_len(struct adapter *, int, uint32_t, uint32_t, uint32_t *); static int fixup_devlog_params(struct adapter *); static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *); static int contact_firmware(struct adapter *); static int partition_resources(struct adapter *); static int get_params__pre_init(struct adapter *); static int set_params__pre_init(struct adapter *); static int get_params__post_init(struct adapter *); static int set_params__post_init(struct adapter *); static void t4_set_desc(struct adapter *); static bool fixed_ifmedia(struct port_info *); static void build_medialist(struct port_info *); static void init_link_config(struct port_info *); static int fixup_link_config(struct port_info *); static int apply_link_config(struct port_info *); static int cxgbe_init_synchronized(struct vi_info *); static int cxgbe_uninit_synchronized(struct vi_info *); static int adapter_full_init(struct adapter *); static void adapter_full_uninit(struct adapter *); static int vi_full_init(struct vi_info *); static void vi_full_uninit(struct vi_info *); static int alloc_extra_vi(struct adapter *, struct port_info *, struct vi_info *); static void quiesce_txq(struct sge_txq *); static void quiesce_wrq(struct sge_wrq *); static void quiesce_iq_fl(struct adapter *, struct sge_iq *, struct sge_fl *); static void quiesce_vi(struct vi_info *); static int t4_alloc_irq(struct adapter *, struct irq *, int rid, driver_intr_t *, void *, char *); static int t4_free_irq(struct adapter *, struct irq *); static void t4_init_atid_table(struct adapter *); static void t4_free_atid_table(struct adapter *); static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); static void vi_refresh_stats(struct vi_info *); static void cxgbe_refresh_stats(struct vi_info *); static void cxgbe_tick(void *); static void vi_tick(void *); static void cxgbe_sysctls(struct port_info *); static int sysctl_int_array(SYSCTL_HANDLER_ARGS); static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS); static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS); static int sysctl_btphy(SYSCTL_HANDLER_ARGS); static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); static int sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); static int sysctl_fec(SYSCTL_HANDLER_ARGS); static int sysctl_module_fec(SYSCTL_HANDLER_ARGS); static int sysctl_autoneg(SYSCTL_HANDLER_ARGS); static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); static int sysctl_temperature(SYSCTL_HANDLER_ARGS); static int sysctl_vdd(SYSCTL_HANDLER_ARGS); static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS); static int sysctl_loadavg(SYSCTL_HANDLER_ARGS); static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS); static int sysctl_devlog(SYSCTL_HANDLER_ARGS); static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tids(SYSCTL_HANDLER_ARGS); static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tnl_stats(SYSCTL_HANDLER_ARGS); static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); static int sysctl_cpus(SYSCTL_HANDLER_ARGS); static int sysctl_reset(SYSCTL_HANDLER_ARGS); #ifdef TCP_OFFLOAD static int sysctl_tls(SYSCTL_HANDLER_ARGS); static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS); static int sysctl_tls_rx_timeout(SYSCTL_HANDLER_ARGS); static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS); static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS); static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS); #endif static int get_sge_context(struct adapter *, struct t4_sge_context *); static int load_fw(struct adapter *, struct t4_data *); static int load_cfg(struct adapter *, struct t4_data *); static int load_boot(struct adapter *, struct t4_bootrom *); static int load_bootcfg(struct adapter *, struct t4_data *); static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *); static void free_offload_policy(struct t4_offload_policy *); static int set_offload_policy(struct adapter *, struct t4_offload_policy *); static int read_card_mem(struct adapter *, int, struct t4_mem_range *); static int read_i2c(struct adapter *, struct t4_i2c_data *); static int clear_stats(struct adapter *, u_int); static int hold_clip_addr(struct adapter *, struct t4_clip_addr *); static int release_clip_addr(struct adapter *, struct t4_clip_addr *); #ifdef TCP_OFFLOAD static int toe_capability(struct vi_info *, bool); static void t4_async_event(void *, int); #endif #ifdef KERN_TLS static int ktls_capability(struct adapter *, bool); #endif static int mod_event(module_t, int, void *); static int notify_siblings(device_t, int); static uint64_t vi_get_counter(struct ifnet *, ift_counter); static uint64_t cxgbe_get_counter(struct ifnet *, ift_counter); static void enable_vxlan_rx(struct adapter *); static void reset_adapter(void *, int); struct { uint16_t device; char *desc; } t4_pciids[] = { {0xa000, "Chelsio Terminator 4 FPGA"}, {0x4400, "Chelsio T440-dbg"}, {0x4401, "Chelsio T420-CR"}, {0x4402, "Chelsio T422-CR"}, {0x4403, "Chelsio T440-CR"}, {0x4404, "Chelsio T420-BCH"}, {0x4405, "Chelsio T440-BCH"}, {0x4406, "Chelsio T440-CH"}, {0x4407, "Chelsio T420-SO"}, {0x4408, "Chelsio T420-CX"}, {0x4409, "Chelsio T420-BT"}, {0x440a, "Chelsio T404-BT"}, {0x440e, "Chelsio T440-LP-CR"}, }, t5_pciids[] = { {0xb000, "Chelsio Terminator 5 FPGA"}, {0x5400, "Chelsio T580-dbg"}, {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */ {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */ {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */ {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */ /* Custom */ {0x5483, "Custom T540-CR"}, {0x5484, "Custom T540-BT"}, }, t6_pciids[] = { {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */ {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */ {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */ {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */ {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */ {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */ {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */ {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */ {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */ {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */ {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */ {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */ {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */ {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */ {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */ {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */ /* Custom */ {0x6480, "Custom T6225-CR"}, {0x6481, "Custom T62100-CR"}, {0x6482, "Custom T6225-CR"}, {0x6483, "Custom T62100-CR"}, {0x6484, "Custom T64100-CR"}, {0x6485, "Custom T6240-SO"}, {0x6486, "Custom T6225-SO-CR"}, {0x6487, "Custom T6225-CR"}, }; #ifdef TCP_OFFLOAD /* * service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should * be exactly the same for both rxq and ofld_rxq. */ CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); #endif CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); static int t4_probe(device_t dev) { int i; uint16_t v = pci_get_vendor(dev); uint16_t d = pci_get_device(dev); uint8_t f = pci_get_function(dev); if (v != PCI_VENDOR_ID_CHELSIO) return (ENXIO); /* Attach only to PF0 of the FPGA */ if (d == 0xa000 && f != 0) return (ENXIO); for (i = 0; i < nitems(t4_pciids); i++) { if (d == t4_pciids[i].device) { device_set_desc(dev, t4_pciids[i].desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int t5_probe(device_t dev) { int i; uint16_t v = pci_get_vendor(dev); uint16_t d = pci_get_device(dev); uint8_t f = pci_get_function(dev); if (v != PCI_VENDOR_ID_CHELSIO) return (ENXIO); /* Attach only to PF0 of the FPGA */ if (d == 0xb000 && f != 0) return (ENXIO); for (i = 0; i < nitems(t5_pciids); i++) { if (d == t5_pciids[i].device) { device_set_desc(dev, t5_pciids[i].desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int t6_probe(device_t dev) { int i; uint16_t v = pci_get_vendor(dev); uint16_t d = pci_get_device(dev); if (v != PCI_VENDOR_ID_CHELSIO) return (ENXIO); for (i = 0; i < nitems(t6_pciids); i++) { if (d == t6_pciids[i].device) { device_set_desc(dev, t6_pciids[i].desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static void t5_attribute_workaround(device_t dev) { device_t root_port; uint32_t v; /* * The T5 chips do not properly echo the No Snoop and Relaxed * Ordering attributes when replying to a TLP from a Root * Port. As a workaround, find the parent Root Port and * disable No Snoop and Relaxed Ordering. Note that this * affects all devices under this root port. */ root_port = pci_find_pcie_root_port(dev); if (root_port == NULL) { device_printf(dev, "Unable to find parent root port\n"); return; } v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 0) device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", device_get_nameunit(root_port)); } static const struct devnames devnames[] = { { .nexus_name = "t4nex", .ifnet_name = "cxgbe", .vi_ifnet_name = "vcxgbe", .pf03_drv_name = "t4iov", .vf_nexus_name = "t4vf", .vf_ifnet_name = "cxgbev" }, { .nexus_name = "t5nex", .ifnet_name = "cxl", .vi_ifnet_name = "vcxl", .pf03_drv_name = "t5iov", .vf_nexus_name = "t5vf", .vf_ifnet_name = "cxlv" }, { .nexus_name = "t6nex", .ifnet_name = "cc", .vi_ifnet_name = "vcc", .pf03_drv_name = "t6iov", .vf_nexus_name = "t6vf", .vf_ifnet_name = "ccv" } }; void t4_init_devnames(struct adapter *sc) { int id; id = chip_id(sc); if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames)) sc->names = &devnames[id - CHELSIO_T4]; else { device_printf(sc->dev, "chip id %d is not supported.\n", id); sc->names = NULL; } } static int t4_ifnet_unit(struct adapter *sc, struct port_info *pi) { const char *parent, *name; long value; int line, unit; line = 0; parent = device_get_nameunit(sc->dev); name = sc->names->ifnet_name; while (resource_find_dev(&line, name, &unit, "at", parent) == 0) { if (resource_long_value(name, unit, "port", &value) == 0 && value == pi->port_id) return (unit); } return (-1); } static int t4_attach(device_t dev) { struct adapter *sc; int rc = 0, i, j, rqidx, tqidx, nports; struct make_dev_args mda; struct intrs_and_queues iaq; struct sge *s; uint32_t *buf; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) int ofld_tqidx; #endif #ifdef TCP_OFFLOAD int ofld_rqidx; #endif #ifdef DEV_NETMAP int nm_rqidx, nm_tqidx; #endif int num_vis; sc = device_get_softc(dev); sc->dev = dev; TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); if ((pci_get_device(dev) & 0xff00) == 0x5400) t5_attribute_workaround(dev); pci_enable_busmaster(dev); if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { uint32_t v; pci_set_max_read_req(dev, 4096); v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); if (pcie_relaxed_ordering == 0 && (v & PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) { v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE; pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); } else if (pcie_relaxed_ordering == 1 && (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) { v |= PCIEM_CTL_RELAXED_ORD_ENABLE; pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); } } sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); sc->traceq = -1; mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", device_get_nameunit(dev)); snprintf(sc->lockname, sizeof(sc->lockname), "%s", device_get_nameunit(dev)); mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); t4_add_adapter(sc); mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); TAILQ_INIT(&sc->sfl); callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); sc->policy = NULL; rw_init(&sc->policy_lock, "connection offload policy"); callout_init(&sc->ktls_tick, 1); #ifdef TCP_OFFLOAD TASK_INIT(&sc->async_event_task, 0, t4_async_event, sc); #endif refcount_init(&sc->vxlan_refcount, 0); TASK_INIT(&sc->reset_task, 0, reset_adapter, sc); sc->ctrlq_oid = SYSCTL_ADD_NODE(device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "ctrlq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "control queues"); sc->fwq_oid = SYSCTL_ADD_NODE(device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "fwq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "firmware event queue"); rc = t4_map_bars_0_and_4(sc); if (rc != 0) goto done; /* error message displayed already */ memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); /* Prepare the adapter for operation. */ buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); rc = -t4_prep_adapter(sc, buf); free(buf, M_CXGBE); if (rc != 0) { device_printf(dev, "failed to prepare adapter: %d.\n", rc); goto done; } /* * This is the real PF# to which we're attaching. Works from within PCI * passthrough environments too, where pci_get_function() could return a * different PF# depending on the passthrough configuration. We need to * use the real PF# in all our communication with the firmware. */ j = t4_read_reg(sc, A_PL_WHOAMI); sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); sc->mbox = sc->pf; t4_init_devnames(sc); if (sc->names == NULL) { rc = ENOTSUP; goto done; /* error message displayed already */ } /* * Do this really early, with the memory windows set up even before the * character device. The userland tool's register i/o and mem read * will work even in "recovery mode". */ setup_memwin(sc); if (t4_init_devlog_params(sc, 0) == 0) fixup_devlog_params(sc); make_dev_args_init(&mda); mda.mda_devsw = &t4_cdevsw; mda.mda_uid = UID_ROOT; mda.mda_gid = GID_WHEEL; mda.mda_mode = 0600; mda.mda_si_drv1 = sc; rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); if (rc != 0) device_printf(dev, "failed to create nexus char device: %d.\n", rc); /* Go no further if recovery mode has been requested. */ if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { device_printf(dev, "recovery mode.\n"); goto done; } #if defined(__i386__) if ((cpu_feature & CPUID_CX8) == 0) { device_printf(dev, "64 bit atomics not available.\n"); rc = ENOTSUP; goto done; } #endif /* Contact the firmware and try to become the master driver. */ rc = contact_firmware(sc); if (rc != 0) goto done; /* error message displayed already */ MPASS(sc->flags & FW_OK); rc = get_params__pre_init(sc); if (rc != 0) goto done; /* error message displayed already */ if (sc->flags & MASTER_PF) { rc = partition_resources(sc); if (rc != 0) goto done; /* error message displayed already */ t4_intr_clear(sc); } rc = get_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = set_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = t4_map_bar_2(sc); if (rc != 0) goto done; /* error message displayed already */ rc = t4_create_dma_tag(sc); if (rc != 0) goto done; /* error message displayed already */ /* * First pass over all the ports - allocate VIs and initialize some * basic parameters like mac address, port type, etc. */ for_each_port(sc, i) { struct port_info *pi; pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); sc->port[i] = pi; /* These must be set before t4_port_init */ pi->adapter = sc; pi->port_id = i; /* * XXX: vi[0] is special so we can't delay this allocation until * pi->nvi's final value is known. */ pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE, M_ZERO | M_WAITOK); /* * Allocate the "main" VI and initialize parameters * like mac addr. */ rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); if (rc != 0) { device_printf(dev, "unable to initialize port %d: %d\n", i, rc); free(pi->vi, M_CXGBE); free(pi, M_CXGBE); sc->port[i] = NULL; goto done; } snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", device_get_nameunit(dev), i); mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); sc->chan_map[pi->tx_chan] = i; /* * The MPS counter for FCS errors doesn't work correctly on the * T6 so we use the MAC counter here. Which MAC is in use * depends on the link settings which will be known when the * link comes up. */ if (is_t6(sc)) { pi->fcs_reg = -1; } else if (is_t4(sc)) { pi->fcs_reg = PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L); } else { pi->fcs_reg = T5_PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L); } pi->fcs_base = 0; /* All VIs on this port share this media. */ ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, cxgbe_media_status); PORT_LOCK(pi); init_link_config(pi); fixup_link_config(pi); build_medialist(pi); if (fixed_ifmedia(pi)) pi->flags |= FIXED_IFMEDIA; PORT_UNLOCK(pi); pi->dev = device_add_child(dev, sc->names->ifnet_name, t4_ifnet_unit(sc, pi)); if (pi->dev == NULL) { device_printf(dev, "failed to add device for port %d.\n", i); rc = ENXIO; goto done; } pi->vi[0].dev = pi->dev; device_set_softc(pi->dev, pi); } /* * Interrupt type, # of interrupts, # of rx/tx queues, etc. */ nports = sc->params.nports; rc = cfg_itype_and_nqueues(sc, &iaq); if (rc != 0) goto done; /* error message displayed already */ num_vis = iaq.num_vis; sc->intr_type = iaq.intr_type; sc->intr_count = iaq.nirq; s = &sc->sge; s->nrxq = nports * iaq.nrxq; s->ntxq = nports * iaq.ntxq; if (num_vis > 1) { s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi; s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi; } s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ s->neq += nports; /* ctrl queues: 1 per port */ s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ #if defined(TCP_OFFLOAD) || defined(RATELIMIT) if (is_offload(sc) || is_ethoffload(sc)) { s->nofldtxq = nports * iaq.nofldtxq; if (num_vis > 1) s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi; s->neq += s->nofldtxq; s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_ofld_txq), M_CXGBE, M_ZERO | M_WAITOK); } #endif #ifdef TCP_OFFLOAD if (is_offload(sc)) { s->nofldrxq = nports * iaq.nofldrxq; if (num_vis > 1) s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi; s->neq += s->nofldrxq; /* free list */ s->niq += s->nofldrxq; s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), M_CXGBE, M_ZERO | M_WAITOK); } #endif #ifdef DEV_NETMAP s->nnmrxq = 0; s->nnmtxq = 0; if (t4_native_netmap & NN_MAIN_VI) { s->nnmrxq += nports * iaq.nnmrxq; s->nnmtxq += nports * iaq.nnmtxq; } if (num_vis > 1 && t4_native_netmap & NN_EXTRA_VI) { s->nnmrxq += nports * (num_vis - 1) * iaq.nnmrxq_vi; s->nnmtxq += nports * (num_vis - 1) * iaq.nnmtxq_vi; } s->neq += s->nnmtxq + s->nnmrxq; s->niq += s->nnmrxq; s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), M_CXGBE, M_ZERO | M_WAITOK); s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), M_CXGBE, M_ZERO | M_WAITOK); #endif MPASS(s->niq <= s->iqmap_sz); MPASS(s->neq <= s->eqmap_sz); s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE, M_ZERO | M_WAITOK); s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, M_ZERO | M_WAITOK); s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, M_ZERO | M_WAITOK); s->iqmap = malloc(s->iqmap_sz * sizeof(struct sge_iq *), M_CXGBE, M_ZERO | M_WAITOK); s->eqmap = malloc(s->eqmap_sz * sizeof(struct sge_eq *), M_CXGBE, M_ZERO | M_WAITOK); sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, M_ZERO | M_WAITOK); t4_init_l2t(sc, M_WAITOK); t4_init_smt(sc, M_WAITOK); t4_init_tx_sched(sc); t4_init_atid_table(sc); #ifdef RATELIMIT t4_init_etid_table(sc); #endif #ifdef INET6 t4_init_clip_table(sc); #endif if (sc->vres.key.size != 0) sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start, sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK); /* * Second pass over the ports. This time we know the number of rx and * tx queues that each port should get. */ rqidx = tqidx = 0; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) ofld_tqidx = 0; #endif #ifdef TCP_OFFLOAD ofld_rqidx = 0; #endif #ifdef DEV_NETMAP nm_rqidx = nm_tqidx = 0; #endif for_each_port(sc, i) { struct port_info *pi = sc->port[i]; struct vi_info *vi; if (pi == NULL) continue; pi->nvi = num_vis; for_each_vi(pi, j, vi) { vi->pi = pi; vi->adapter = sc; vi->first_intr = -1; vi->qsize_rxq = t4_qsize_rxq; vi->qsize_txq = t4_qsize_txq; vi->first_rxq = rqidx; vi->first_txq = tqidx; vi->tmr_idx = t4_tmr_idx; vi->pktc_idx = t4_pktc_idx; vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi; vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi; rqidx += vi->nrxq; tqidx += vi->ntxq; if (j == 0 && vi->ntxq > 1) vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0; else vi->rsrv_noflowq = 0; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) vi->first_ofld_txq = ofld_tqidx; vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi; ofld_tqidx += vi->nofldtxq; #endif #ifdef TCP_OFFLOAD vi->ofld_tmr_idx = t4_tmr_idx_ofld; vi->ofld_pktc_idx = t4_pktc_idx_ofld; vi->first_ofld_rxq = ofld_rqidx; vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi; ofld_rqidx += vi->nofldrxq; #endif #ifdef DEV_NETMAP vi->first_nm_rxq = nm_rqidx; vi->first_nm_txq = nm_tqidx; if (j == 0) { vi->nnmrxq = iaq.nnmrxq; vi->nnmtxq = iaq.nnmtxq; } else { vi->nnmrxq = iaq.nnmrxq_vi; vi->nnmtxq = iaq.nnmtxq_vi; } nm_rqidx += vi->nnmrxq; nm_tqidx += vi->nnmtxq; #endif } } rc = t4_setup_intr_handlers(sc); if (rc != 0) { device_printf(dev, "failed to setup interrupt handlers: %d\n", rc); goto done; } rc = bus_generic_probe(dev); if (rc != 0) { device_printf(dev, "failed to probe child drivers: %d\n", rc); goto done; } /* * Ensure thread-safe mailbox access (in debug builds). * * So far this was the only thread accessing the mailbox but various * ifnets and sysctls are about to be created and their handlers/ioctls * will access the mailbox from different threads. */ sc->flags |= CHK_MBOX_ACCESS; rc = bus_generic_attach(dev); if (rc != 0) { device_printf(dev, "failed to attach all child ports: %d\n", rc); goto done; } device_printf(dev, "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", sc->params.pci.speed, sc->params.pci.width, sc->params.nports, sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); t4_set_desc(sc); notify_siblings(dev, 0); done: if (rc != 0 && sc->cdev) { /* cdev was created and so cxgbetool works; recover that way. */ device_printf(dev, "error during attach, adapter is now in recovery mode.\n"); rc = 0; } if (rc != 0) t4_detach_common(dev); else t4_sysctls(sc); return (rc); } static int -t4_child_location_str(device_t bus, device_t dev, char *buf, size_t buflen) +t4_child_location(device_t bus, device_t dev, struct sbuf *sb) { struct adapter *sc; struct port_info *pi; int i; sc = device_get_softc(bus); - buf[0] = '\0'; for_each_port(sc, i) { pi = sc->port[i]; if (pi != NULL && pi->dev == dev) { - snprintf(buf, buflen, "port=%d", pi->port_id); + sbuf_printf(sb, "port=%d", pi->port_id); break; } } return (0); } static int t4_ready(device_t dev) { struct adapter *sc; sc = device_get_softc(dev); if (sc->flags & FW_OK) return (0); return (ENXIO); } static int t4_read_port_device(device_t dev, int port, device_t *child) { struct adapter *sc; struct port_info *pi; sc = device_get_softc(dev); if (port < 0 || port >= MAX_NPORTS) return (EINVAL); pi = sc->port[port]; if (pi == NULL || pi->dev == NULL) return (ENXIO); *child = pi->dev; return (0); } static int notify_siblings(device_t dev, int detaching) { device_t sibling; int error, i; error = 0; for (i = 0; i < PCI_FUNCMAX; i++) { if (i == pci_get_function(dev)) continue; sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), i); if (sibling == NULL || !device_is_attached(sibling)) continue; if (detaching) error = T4_DETACH_CHILD(sibling); else (void)T4_ATTACH_CHILD(sibling); if (error) break; } return (error); } /* * Idempotent */ static int t4_detach(device_t dev) { struct adapter *sc; int rc; sc = device_get_softc(dev); rc = notify_siblings(dev, 1); if (rc) { device_printf(dev, "failed to detach sibling devices: %d\n", rc); return (rc); } return (t4_detach_common(dev)); } int t4_detach_common(device_t dev) { struct adapter *sc; struct port_info *pi; int i, rc; sc = device_get_softc(dev); if (sc->cdev) { destroy_dev(sc->cdev); sc->cdev = NULL; } sx_xlock(&t4_list_lock); SLIST_REMOVE(&t4_list, sc, adapter, link); sx_xunlock(&t4_list_lock); sc->flags &= ~CHK_MBOX_ACCESS; if (sc->flags & FULL_INIT_DONE) { if (!(sc->flags & IS_VF)) t4_intr_disable(sc); } if (device_is_attached(dev)) { rc = bus_generic_detach(dev); if (rc) { device_printf(dev, "failed to detach child devices: %d\n", rc); return (rc); } } #ifdef TCP_OFFLOAD taskqueue_drain(taskqueue_thread, &sc->async_event_task); #endif for (i = 0; i < sc->intr_count; i++) t4_free_irq(sc, &sc->irq[i]); if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) t4_free_tx_sched(sc); for (i = 0; i < MAX_NPORTS; i++) { pi = sc->port[i]; if (pi) { t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); if (pi->dev) device_delete_child(dev, pi->dev); mtx_destroy(&pi->pi_lock); free(pi->vi, M_CXGBE); free(pi, M_CXGBE); } } device_delete_children(dev); adapter_full_uninit(sc); if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) t4_fw_bye(sc, sc->mbox); if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) pci_release_msi(dev); if (sc->regs_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, sc->regs_res); if (sc->udbs_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, sc->udbs_res); if (sc->msix_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, sc->msix_res); if (sc->l2t) t4_free_l2t(sc->l2t); if (sc->smt) t4_free_smt(sc->smt); t4_free_atid_table(sc); #ifdef RATELIMIT t4_free_etid_table(sc); #endif if (sc->key_map) vmem_destroy(sc->key_map); #ifdef INET6 t4_destroy_clip_table(sc); #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) free(sc->sge.ofld_txq, M_CXGBE); #endif #ifdef TCP_OFFLOAD free(sc->sge.ofld_rxq, M_CXGBE); #endif #ifdef DEV_NETMAP free(sc->sge.nm_rxq, M_CXGBE); free(sc->sge.nm_txq, M_CXGBE); #endif free(sc->irq, M_CXGBE); free(sc->sge.rxq, M_CXGBE); free(sc->sge.txq, M_CXGBE); free(sc->sge.ctrlq, M_CXGBE); free(sc->sge.iqmap, M_CXGBE); free(sc->sge.eqmap, M_CXGBE); free(sc->tids.ftid_tab, M_CXGBE); free(sc->tids.hpftid_tab, M_CXGBE); free_hftid_hash(&sc->tids); free(sc->tids.tid_tab, M_CXGBE); free(sc->tt.tls_rx_ports, M_CXGBE); t4_destroy_dma_tag(sc); callout_drain(&sc->ktls_tick); callout_drain(&sc->sfl_callout); if (mtx_initialized(&sc->tids.ftid_lock)) { mtx_destroy(&sc->tids.ftid_lock); cv_destroy(&sc->tids.ftid_cv); } if (mtx_initialized(&sc->tids.atid_lock)) mtx_destroy(&sc->tids.atid_lock); if (mtx_initialized(&sc->ifp_lock)) mtx_destroy(&sc->ifp_lock); if (rw_initialized(&sc->policy_lock)) { rw_destroy(&sc->policy_lock); #ifdef TCP_OFFLOAD if (sc->policy != NULL) free_offload_policy(sc->policy); #endif } for (i = 0; i < NUM_MEMWIN; i++) { struct memwin *mw = &sc->memwin[i]; if (rw_initialized(&mw->mw_lock)) rw_destroy(&mw->mw_lock); } mtx_destroy(&sc->sfl_lock); mtx_destroy(&sc->reg_lock); mtx_destroy(&sc->sc_lock); bzero(sc, sizeof(*sc)); return (0); } static inline bool ok_to_reset(struct adapter *sc) { struct tid_info *t = &sc->tids; struct port_info *pi; struct vi_info *vi; int i, j; const int caps = IFCAP_TOE | IFCAP_TXTLS | IFCAP_NETMAP | IFCAP_TXRTLMT; ASSERT_SYNCHRONIZED_OP(sc); MPASS(!(sc->flags & IS_VF)); for_each_port(sc, i) { pi = sc->port[i]; for_each_vi(pi, j, vi) { if (vi->ifp->if_capenable & caps) return (false); } } if (atomic_load_int(&t->tids_in_use) > 0) return (false); if (atomic_load_int(&t->stids_in_use) > 0) return (false); if (atomic_load_int(&t->atids_in_use) > 0) return (false); if (atomic_load_int(&t->ftids_in_use) > 0) return (false); if (atomic_load_int(&t->hpftids_in_use) > 0) return (false); if (atomic_load_int(&t->etids_in_use) > 0) return (false); return (true); } static int t4_suspend(device_t dev) { struct adapter *sc = device_get_softc(dev); struct port_info *pi; struct vi_info *vi; struct ifnet *ifp; struct sge_rxq *rxq; struct sge_txq *txq; struct sge_wrq *wrq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) struct sge_ofld_txq *ofld_txq; #endif int rc, i, j, k; CH_ALERT(sc, "suspend requested\n"); rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4sus"); if (rc != 0) return (ENXIO); /* XXX: Can the kernel call suspend repeatedly without resume? */ MPASS(!hw_off_limits(sc)); if (!ok_to_reset(sc)) { /* XXX: should list what resource is preventing suspend. */ CH_ERR(sc, "not safe to suspend.\n"); rc = EBUSY; goto done; } /* No more DMA or interrupts. */ t4_shutdown_adapter(sc); /* Quiesce all activity. */ for_each_port(sc, i) { pi = sc->port[i]; pi->vxlan_tcam_entry = false; PORT_LOCK(pi); if (pi->up_vis > 0) { /* * t4_shutdown_adapter has already shut down all the * PHYs but it also disables interrupts and DMA so there * won't be a link interrupt. So we update the state * manually and inform the kernel. */ pi->link_cfg.link_ok = false; t4_os_link_changed(pi); } PORT_UNLOCK(pi); for_each_vi(pi, j, vi) { vi->xact_addr_filt = -1; if (!(vi->flags & VI_INIT_DONE)) continue; ifp = vi->ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { mtx_lock(&vi->tick_mtx); vi->flags |= VI_SKIP_STATS; callout_stop(&vi->tick); mtx_unlock(&vi->tick_mtx); callout_drain(&vi->tick); } /* * Note that the HW is not available. */ for_each_txq(vi, k, txq) { TXQ_LOCK(txq); txq->eq.flags &= ~(EQ_ENABLED | EQ_HW_ALLOCATED); TXQ_UNLOCK(txq); } #if defined(TCP_OFFLOAD) || defined(RATELIMIT) for_each_ofld_txq(vi, k, ofld_txq) { ofld_txq->wrq.eq.flags &= ~EQ_HW_ALLOCATED; } #endif for_each_rxq(vi, k, rxq) { rxq->iq.flags &= ~IQ_HW_ALLOCATED; } #if defined(TCP_OFFLOAD) for_each_ofld_rxq(vi, k, ofld_rxq) { ofld_rxq->iq.flags &= ~IQ_HW_ALLOCATED; } #endif quiesce_vi(vi); } if (sc->flags & FULL_INIT_DONE) { /* Control queue */ wrq = &sc->sge.ctrlq[i]; wrq->eq.flags &= ~EQ_HW_ALLOCATED; quiesce_wrq(wrq); } } if (sc->flags & FULL_INIT_DONE) { /* Firmware event queue */ sc->sge.fwq.flags &= ~IQ_HW_ALLOCATED; quiesce_iq_fl(sc, &sc->sge.fwq, NULL); } /* Mark the adapter totally off limits. */ mtx_lock(&sc->reg_lock); sc->flags |= HW_OFF_LIMITS; sc->flags &= ~(FW_OK | MASTER_PF); sc->reset_thread = NULL; mtx_unlock(&sc->reg_lock); sc->num_resets++; CH_ALERT(sc, "suspend completed.\n"); done: end_synchronized_op(sc, 0); return (rc); } struct adapter_pre_reset_state { u_int flags; uint16_t nbmcaps; uint16_t linkcaps; uint16_t switchcaps; uint16_t niccaps; uint16_t toecaps; uint16_t rdmacaps; uint16_t cryptocaps; uint16_t iscsicaps; uint16_t fcoecaps; u_int cfcsum; char cfg_file[32]; struct adapter_params params; struct t4_virt_res vres; struct tid_info tids; struct sge sge; int rawf_base; int nrawf; }; static void save_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o) { ASSERT_SYNCHRONIZED_OP(sc); o->flags = sc->flags; o->nbmcaps = sc->nbmcaps; o->linkcaps = sc->linkcaps; o->switchcaps = sc->switchcaps; o->niccaps = sc->niccaps; o->toecaps = sc->toecaps; o->rdmacaps = sc->rdmacaps; o->cryptocaps = sc->cryptocaps; o->iscsicaps = sc->iscsicaps; o->fcoecaps = sc->fcoecaps; o->cfcsum = sc->cfcsum; MPASS(sizeof(o->cfg_file) == sizeof(sc->cfg_file)); memcpy(o->cfg_file, sc->cfg_file, sizeof(o->cfg_file)); o->params = sc->params; o->vres = sc->vres; o->tids = sc->tids; o->sge = sc->sge; o->rawf_base = sc->rawf_base; o->nrawf = sc->nrawf; } static int compare_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o) { int rc = 0; ASSERT_SYNCHRONIZED_OP(sc); /* Capabilities */ #define COMPARE_CAPS(c) do { \ if (o->c##caps != sc->c##caps) { \ CH_ERR(sc, "%scaps 0x%04x -> 0x%04x.\n", #c, o->c##caps, \ sc->c##caps); \ rc = EINVAL; \ } \ } while (0) COMPARE_CAPS(nbm); COMPARE_CAPS(link); COMPARE_CAPS(switch); COMPARE_CAPS(nic); COMPARE_CAPS(toe); COMPARE_CAPS(rdma); COMPARE_CAPS(crypto); COMPARE_CAPS(iscsi); COMPARE_CAPS(fcoe); #undef COMPARE_CAPS /* Firmware config file */ if (o->cfcsum != sc->cfcsum) { CH_ERR(sc, "config file %s (0x%x) -> %s (0x%x)\n", o->cfg_file, o->cfcsum, sc->cfg_file, sc->cfcsum); rc = EINVAL; } #define COMPARE_PARAM(p, name) do { \ if (o->p != sc->p) { \ CH_ERR(sc, #name " %d -> %d\n", o->p, sc->p); \ rc = EINVAL; \ } \ } while (0) COMPARE_PARAM(sge.iq_start, iq_start); COMPARE_PARAM(sge.eq_start, eq_start); COMPARE_PARAM(tids.ftid_base, ftid_base); COMPARE_PARAM(tids.ftid_end, ftid_end); COMPARE_PARAM(tids.nftids, nftids); COMPARE_PARAM(vres.l2t.start, l2t_start); COMPARE_PARAM(vres.l2t.size, l2t_size); COMPARE_PARAM(sge.iqmap_sz, iqmap_sz); COMPARE_PARAM(sge.eqmap_sz, eqmap_sz); COMPARE_PARAM(tids.tid_base, tid_base); COMPARE_PARAM(tids.hpftid_base, hpftid_base); COMPARE_PARAM(tids.hpftid_end, hpftid_end); COMPARE_PARAM(tids.nhpftids, nhpftids); COMPARE_PARAM(rawf_base, rawf_base); COMPARE_PARAM(nrawf, nrawf); COMPARE_PARAM(params.mps_bg_map, mps_bg_map); COMPARE_PARAM(params.filter2_wr_support, filter2_wr_support); COMPARE_PARAM(params.ulptx_memwrite_dsgl, ulptx_memwrite_dsgl); COMPARE_PARAM(params.fr_nsmr_tpte_wr_support, fr_nsmr_tpte_wr_support); COMPARE_PARAM(params.max_pkts_per_eth_tx_pkts_wr, max_pkts_per_eth_tx_pkts_wr); COMPARE_PARAM(tids.ntids, ntids); COMPARE_PARAM(tids.etid_base, etid_base); COMPARE_PARAM(tids.etid_end, etid_end); COMPARE_PARAM(tids.netids, netids); COMPARE_PARAM(params.eo_wr_cred, eo_wr_cred); COMPARE_PARAM(params.ethoffload, ethoffload); COMPARE_PARAM(tids.natids, natids); COMPARE_PARAM(tids.stid_base, stid_base); COMPARE_PARAM(vres.ddp.start, ddp_start); COMPARE_PARAM(vres.ddp.size, ddp_size); COMPARE_PARAM(params.ofldq_wr_cred, ofldq_wr_cred); COMPARE_PARAM(vres.stag.start, stag_start); COMPARE_PARAM(vres.stag.size, stag_size); COMPARE_PARAM(vres.rq.start, rq_start); COMPARE_PARAM(vres.rq.size, rq_size); COMPARE_PARAM(vres.pbl.start, pbl_start); COMPARE_PARAM(vres.pbl.size, pbl_size); COMPARE_PARAM(vres.qp.start, qp_start); COMPARE_PARAM(vres.qp.size, qp_size); COMPARE_PARAM(vres.cq.start, cq_start); COMPARE_PARAM(vres.cq.size, cq_size); COMPARE_PARAM(vres.ocq.start, ocq_start); COMPARE_PARAM(vres.ocq.size, ocq_size); COMPARE_PARAM(vres.srq.start, srq_start); COMPARE_PARAM(vres.srq.size, srq_size); COMPARE_PARAM(params.max_ordird_qp, max_ordird_qp); COMPARE_PARAM(params.max_ird_adapter, max_ird_adapter); COMPARE_PARAM(vres.iscsi.start, iscsi_start); COMPARE_PARAM(vres.iscsi.size, iscsi_size); COMPARE_PARAM(vres.key.start, key_start); COMPARE_PARAM(vres.key.size, key_size); #undef COMPARE_PARAM return (rc); } static int t4_resume(device_t dev) { struct adapter *sc = device_get_softc(dev); struct adapter_pre_reset_state *old_state = NULL; struct port_info *pi; struct vi_info *vi; struct ifnet *ifp; struct sge_txq *txq; int rc, i, j, k; CH_ALERT(sc, "resume requested.\n"); rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4res"); if (rc != 0) return (ENXIO); MPASS(hw_off_limits(sc)); MPASS((sc->flags & FW_OK) == 0); MPASS((sc->flags & MASTER_PF) == 0); MPASS(sc->reset_thread == NULL); sc->reset_thread = curthread; /* Register access is expected to work by the time we're here. */ if (t4_read_reg(sc, A_PL_WHOAMI) == 0xffffffff) { CH_ERR(sc, "%s: can't read device registers\n", __func__); rc = ENXIO; goto done; } /* Restore memory window. */ setup_memwin(sc); /* Go no further if recovery mode has been requested. */ if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { CH_ALERT(sc, "recovery mode on resume.\n"); rc = 0; mtx_lock(&sc->reg_lock); sc->flags &= ~HW_OFF_LIMITS; mtx_unlock(&sc->reg_lock); goto done; } old_state = malloc(sizeof(*old_state), M_CXGBE, M_ZERO | M_WAITOK); save_caps_and_params(sc, old_state); /* Reestablish contact with firmware and become the primary PF. */ rc = contact_firmware(sc); if (rc != 0) goto done; /* error message displayed already */ MPASS(sc->flags & FW_OK); if (sc->flags & MASTER_PF) { rc = partition_resources(sc); if (rc != 0) goto done; /* error message displayed already */ t4_intr_clear(sc); } rc = get_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = set_params__post_init(sc); if (rc != 0) goto done; /* error message displayed already */ rc = compare_caps_and_params(sc, old_state); if (rc != 0) goto done; /* error message displayed already */ for_each_port(sc, i) { pi = sc->port[i]; MPASS(pi != NULL); MPASS(pi->vi != NULL); MPASS(pi->vi[0].dev == pi->dev); rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); if (rc != 0) { CH_ERR(sc, "failed to re-initialize port %d: %d\n", i, rc); goto done; } MPASS(sc->chan_map[pi->tx_chan] == i); PORT_LOCK(pi); fixup_link_config(pi); build_medialist(pi); PORT_UNLOCK(pi); for_each_vi(pi, j, vi) { if (IS_MAIN_VI(vi)) continue; rc = alloc_extra_vi(sc, pi, vi); if (rc != 0) { CH_ERR(vi, "failed to re-allocate extra VI: %d\n", rc); goto done; } } } /* * Interrupts and queues are about to be enabled and other threads will * want to access the hardware too. It is safe to do so. Note that * this thread is still in the middle of a synchronized_op. */ mtx_lock(&sc->reg_lock); sc->flags &= ~HW_OFF_LIMITS; mtx_unlock(&sc->reg_lock); if (sc->flags & FULL_INIT_DONE) { rc = adapter_full_init(sc); if (rc != 0) { CH_ERR(sc, "failed to re-initialize adapter: %d\n", rc); goto done; } if (sc->vxlan_refcount > 0) enable_vxlan_rx(sc); for_each_port(sc, i) { pi = sc->port[i]; for_each_vi(pi, j, vi) { if (!(vi->flags & VI_INIT_DONE)) continue; rc = vi_full_init(vi); if (rc != 0) { CH_ERR(vi, "failed to re-initialize " "interface: %d\n", rc); goto done; } ifp = vi->ifp; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) continue; /* * Note that we do not setup multicast addresses * in the first pass. This ensures that the * unicast DMACs for all VIs on all ports get an * MPS TCAM entry. */ rc = update_mac_settings(ifp, XGMAC_ALL & ~XGMAC_MCADDRS); if (rc != 0) { CH_ERR(vi, "failed to re-configure MAC: %d\n", rc); goto done; } rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); if (rc != 0) { CH_ERR(vi, "failed to re-enable VI: %d\n", rc); goto done; } for_each_txq(vi, k, txq) { TXQ_LOCK(txq); txq->eq.flags |= EQ_ENABLED; TXQ_UNLOCK(txq); } mtx_lock(&vi->tick_mtx); vi->flags &= ~VI_SKIP_STATS; callout_schedule(&vi->tick, hz); mtx_unlock(&vi->tick_mtx); } PORT_LOCK(pi); if (pi->up_vis > 0) { t4_update_port_info(pi); fixup_link_config(pi); build_medialist(pi); apply_link_config(pi); if (pi->link_cfg.link_ok) t4_os_link_changed(pi); } PORT_UNLOCK(pi); } /* Now reprogram the L2 multicast addresses. */ for_each_port(sc, i) { pi = sc->port[i]; for_each_vi(pi, j, vi) { if (!(vi->flags & VI_INIT_DONE)) continue; ifp = vi->ifp; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) continue; rc = update_mac_settings(ifp, XGMAC_MCADDRS); if (rc != 0) { CH_ERR(vi, "failed to re-configure MCAST MACs: %d\n", rc); rc = 0; /* carry on */ } } } } done: if (rc == 0) { sc->incarnation++; CH_ALERT(sc, "resume completed.\n"); } end_synchronized_op(sc, 0); free(old_state, M_CXGBE); return (rc); } static int t4_reset_prepare(device_t dev, device_t child) { struct adapter *sc = device_get_softc(dev); CH_ALERT(sc, "reset_prepare.\n"); return (0); } static int t4_reset_post(device_t dev, device_t child) { struct adapter *sc = device_get_softc(dev); CH_ALERT(sc, "reset_post.\n"); return (0); } static void reset_adapter(void *arg, int pending) { struct adapter *sc = arg; int rc; CH_ALERT(sc, "reset requested.\n"); rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rst1"); if (rc != 0) return; if (hw_off_limits(sc)) { CH_ERR(sc, "adapter is suspended, use resume (not reset).\n"); rc = ENXIO; goto done; } if (!ok_to_reset(sc)) { /* XXX: should list what resource is preventing reset. */ CH_ERR(sc, "not safe to reset.\n"); rc = EBUSY; goto done; } done: end_synchronized_op(sc, 0); if (rc != 0) return; /* Error logged already. */ mtx_lock(&Giant); rc = BUS_RESET_CHILD(device_get_parent(sc->dev), sc->dev, 0); mtx_unlock(&Giant); if (rc != 0) CH_ERR(sc, "bus_reset_child failed: %d.\n", rc); else CH_ALERT(sc, "bus_reset_child succeeded.\n"); } static int cxgbe_probe(device_t dev) { char buf[128]; struct port_info *pi = device_get_softc(dev); snprintf(buf, sizeof(buf), "port %d", pi->port_id); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS | \ IFCAP_HWRXTSTMP | IFCAP_MEXTPG) #define T4_CAP_ENABLE (T4_CAP) static int cxgbe_vi_attach(device_t dev, struct vi_info *vi) { struct ifnet *ifp; struct sbuf *sb; struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct pfil_head_args pa; struct adapter *sc = vi->adapter; ctx = device_get_sysctl_ctx(vi->dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(vi->dev)); vi->rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rxq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC rx queues"); vi->txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "txq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NIC tx queues"); #ifdef DEV_NETMAP vi->nm_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_rxq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap rx queues"); vi->nm_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "nm_txq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queues"); #endif #ifdef TCP_OFFLOAD vi->ofld_rxq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_rxq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE rx queues"); #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) vi->ofld_txq_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "ofld_txq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE/ETHOFLD tx queues"); #endif vi->xact_addr_filt = -1; mtx_init(&vi->tick_mtx, "vi tick", NULL, MTX_DEF); callout_init_mtx(&vi->tick, &vi->tick_mtx, 0); if (sc->flags & IS_VF || t4_tx_vm_wr != 0) vi->flags |= TX_USES_VM_WR; /* Allocate an ifnet and set it up */ ifp = if_alloc_dev(IFT_ETHER, dev); if (ifp == NULL) { device_printf(dev, "Cannot allocate ifnet\n"); return (ENOMEM); } vi->ifp = ifp; ifp->if_softc = vi; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = cxgbe_init; ifp->if_ioctl = cxgbe_ioctl; ifp->if_transmit = cxgbe_transmit; ifp->if_qflush = cxgbe_qflush; if (vi->pi->nvi > 1 || sc->flags & IS_VF) ifp->if_get_counter = vi_get_counter; else ifp->if_get_counter = cxgbe_get_counter; #if defined(KERN_TLS) || defined(RATELIMIT) ifp->if_snd_tag_alloc = cxgbe_snd_tag_alloc; ifp->if_snd_tag_modify = cxgbe_snd_tag_modify; ifp->if_snd_tag_query = cxgbe_snd_tag_query; ifp->if_snd_tag_free = cxgbe_snd_tag_free; #endif #ifdef RATELIMIT ifp->if_ratelimit_query = cxgbe_ratelimit_query; #endif ifp->if_capabilities = T4_CAP; ifp->if_capenable = T4_CAP_ENABLE; ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | CSUM_UDP_IPV6 | CSUM_TCP_IPV6; if (chip_id(sc) >= CHELSIO_T6) { ifp->if_capabilities |= IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO; ifp->if_capenable |= IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO; ifp->if_hwassist |= CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_ENCAP_VXLAN; } #ifdef TCP_OFFLOAD if (vi->nofldrxq != 0) ifp->if_capabilities |= IFCAP_TOE; #endif #ifdef RATELIMIT if (is_ethoffload(sc) && vi->nofldtxq != 0) { ifp->if_capabilities |= IFCAP_TXRTLMT; ifp->if_capenable |= IFCAP_TXRTLMT; } #endif ifp->if_hw_tsomax = IP_MAXPACKET; if (vi->flags & TX_USES_VM_WR) ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_VM_TSO; else ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_TSO; #ifdef RATELIMIT if (is_ethoffload(sc) && vi->nofldtxq != 0) ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_EO_TSO; #endif ifp->if_hw_tsomaxsegsize = 65536; #ifdef KERN_TLS if (is_ktls(sc)) { ifp->if_capabilities |= IFCAP_TXTLS; if (sc->flags & KERN_TLS_ON) ifp->if_capenable |= IFCAP_TXTLS; } #endif ether_ifattach(ifp, vi->hw_addr); #ifdef DEV_NETMAP if (vi->nnmrxq != 0) cxgbe_nm_attach(vi); #endif sb = sbuf_new_auto(); sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); #if defined(TCP_OFFLOAD) || defined(RATELIMIT) switch (ifp->if_capabilities & (IFCAP_TOE | IFCAP_TXRTLMT)) { case IFCAP_TOE: sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq); break; case IFCAP_TOE | IFCAP_TXRTLMT: sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq); break; case IFCAP_TXRTLMT: sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq); break; } #endif #ifdef TCP_OFFLOAD if (ifp->if_capabilities & IFCAP_TOE) sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq); #endif #ifdef DEV_NETMAP if (ifp->if_capabilities & IFCAP_NETMAP) sbuf_printf(sb, "; %d txq, %d rxq (netmap)", vi->nnmtxq, vi->nnmrxq); #endif sbuf_finish(sb); device_printf(dev, "%s\n", sbuf_data(sb)); sbuf_delete(sb); vi_sysctls(vi); pa.pa_version = PFIL_VERSION; pa.pa_flags = PFIL_IN; pa.pa_type = PFIL_TYPE_ETHERNET; pa.pa_headname = ifp->if_xname; vi->pfil = pfil_head_register(&pa); return (0); } static int cxgbe_attach(device_t dev) { struct port_info *pi = device_get_softc(dev); struct adapter *sc = pi->adapter; struct vi_info *vi; int i, rc; rc = cxgbe_vi_attach(dev, &pi->vi[0]); if (rc) return (rc); for_each_vi(pi, i, vi) { if (i == 0) continue; vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1); if (vi->dev == NULL) { device_printf(dev, "failed to add VI %d\n", i); continue; } device_set_softc(vi->dev, vi); } cxgbe_sysctls(pi); bus_generic_attach(dev); return (0); } static void cxgbe_vi_detach(struct vi_info *vi) { struct ifnet *ifp = vi->ifp; if (vi->pfil != NULL) { pfil_head_unregister(vi->pfil); vi->pfil = NULL; } ether_ifdetach(ifp); /* Let detach proceed even if these fail. */ #ifdef DEV_NETMAP if (ifp->if_capabilities & IFCAP_NETMAP) cxgbe_nm_detach(vi); #endif cxgbe_uninit_synchronized(vi); callout_drain(&vi->tick); vi_full_uninit(vi); if_free(vi->ifp); vi->ifp = NULL; } static int cxgbe_detach(device_t dev) { struct port_info *pi = device_get_softc(dev); struct adapter *sc = pi->adapter; int rc; /* Detach the extra VIs first. */ rc = bus_generic_detach(dev); if (rc) return (rc); device_delete_children(dev); doom_vi(sc, &pi->vi[0]); if (pi->flags & HAS_TRACEQ) { sc->traceq = -1; /* cloner should not create ifnet */ t4_tracer_port_detach(sc); } cxgbe_vi_detach(&pi->vi[0]); ifmedia_removeall(&pi->media); end_synchronized_op(sc, 0); return (0); } static void cxgbe_init(void *arg) { struct vi_info *vi = arg; struct adapter *sc = vi->adapter; if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) return; cxgbe_init_synchronized(vi); end_synchronized_op(sc, 0); } static int cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) { int rc = 0, mtu, flags; struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct ifreq *ifr = (struct ifreq *)data; uint32_t mask; switch (cmd) { case SIOCSIFMTU: mtu = ifr->ifr_mtu; if (mtu < ETHERMIN || mtu > MAX_MTU) return (EINVAL); rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); if (rc) return (rc); ifp->if_mtu = mtu; if (vi->flags & VI_INIT_DONE) { t4_update_fl_bufsize(ifp); if (!hw_off_limits(sc) && ifp->if_drv_flags & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_MTU); } end_synchronized_op(sc, 0); break; case SIOCSIFFLAGS: rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto fail; } if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { flags = vi->if_flags; if ((ifp->if_flags ^ flags) & (IFF_PROMISC | IFF_ALLMULTI)) { rc = update_mac_settings(ifp, XGMAC_PROMISC | XGMAC_ALLMULTI); } } else { rc = cxgbe_init_synchronized(vi); } vi->if_flags = ifp->if_flags; } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { rc = cxgbe_uninit_synchronized(vi); } end_synchronized_op(sc, 0); break; case SIOCADDMULTI: case SIOCDELMULTI: rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi"); if (rc) return (rc); if (!hw_off_limits(sc) && ifp->if_drv_flags & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_MCADDRS); end_synchronized_op(sc, 0); break; case SIOCSIFCAP: rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); if (rc) return (rc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); if (IFCAP_TSO4 & ifp->if_capenable && !(IFCAP_TXCSUM & ifp->if_capenable)) { mask &= ~IFCAP_TSO4; ifp->if_capenable &= ~IFCAP_TSO4; if_printf(ifp, "tso4 disabled due to -txcsum.\n"); } } if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); if (IFCAP_TSO6 & ifp->if_capenable && !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { mask &= ~IFCAP_TSO6; ifp->if_capenable &= ~IFCAP_TSO6; if_printf(ifp, "tso6 disabled due to -txcsum6.\n"); } } if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; /* * Note that we leave CSUM_TSO alone (it is always set). The * kernel takes both IFCAP_TSOx and CSUM_TSO into account before * sending a TSO request our way, so it's sufficient to toggle * IFCAP_TSOx only. */ if (mask & IFCAP_TSO4) { if (!(IFCAP_TSO4 & ifp->if_capenable) && !(IFCAP_TXCSUM & ifp->if_capenable)) { if_printf(ifp, "enable txcsum first.\n"); rc = EAGAIN; goto fail; } ifp->if_capenable ^= IFCAP_TSO4; } if (mask & IFCAP_TSO6) { if (!(IFCAP_TSO6 & ifp->if_capenable) && !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { if_printf(ifp, "enable txcsum6 first.\n"); rc = EAGAIN; goto fail; } ifp->if_capenable ^= IFCAP_TSO6; } if (mask & IFCAP_LRO) { #if defined(INET) || defined(INET6) int i; struct sge_rxq *rxq; ifp->if_capenable ^= IFCAP_LRO; for_each_rxq(vi, i, rxq) { if (ifp->if_capenable & IFCAP_LRO) rxq->iq.flags |= IQ_LRO_ENABLED; else rxq->iq.flags &= ~IQ_LRO_ENABLED; } #endif } #ifdef TCP_OFFLOAD if (mask & IFCAP_TOE) { int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; rc = toe_capability(vi, enable); if (rc != 0) goto fail; ifp->if_capenable ^= mask; } #endif if (mask & IFCAP_VLAN_HWTAGGING) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (ifp->if_drv_flags & IFF_DRV_RUNNING) rc = update_mac_settings(ifp, XGMAC_VLANEX); } if (mask & IFCAP_VLAN_MTU) { ifp->if_capenable ^= IFCAP_VLAN_MTU; /* Need to find out how to disable auto-mtu-inflation */ } if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (mask & IFCAP_VLAN_HWCSUM) ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; #ifdef RATELIMIT if (mask & IFCAP_TXRTLMT) ifp->if_capenable ^= IFCAP_TXRTLMT; #endif if (mask & IFCAP_HWRXTSTMP) { int i; struct sge_rxq *rxq; ifp->if_capenable ^= IFCAP_HWRXTSTMP; for_each_rxq(vi, i, rxq) { if (ifp->if_capenable & IFCAP_HWRXTSTMP) rxq->iq.flags |= IQ_RX_TIMESTAMP; else rxq->iq.flags &= ~IQ_RX_TIMESTAMP; } } if (mask & IFCAP_MEXTPG) ifp->if_capenable ^= IFCAP_MEXTPG; #ifdef KERN_TLS if (mask & IFCAP_TXTLS) { int enable = (ifp->if_capenable ^ mask) & IFCAP_TXTLS; rc = ktls_capability(sc, enable); if (rc != 0) goto fail; ifp->if_capenable ^= (mask & IFCAP_TXTLS); } #endif if (mask & IFCAP_VXLAN_HWCSUM) { ifp->if_capenable ^= IFCAP_VXLAN_HWCSUM; ifp->if_hwassist ^= CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP | CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP; } if (mask & IFCAP_VXLAN_HWTSO) { ifp->if_capenable ^= IFCAP_VXLAN_HWTSO; ifp->if_hwassist ^= CSUM_INNER_IP6_TSO | CSUM_INNER_IP_TSO; } #ifdef VLAN_CAPABILITIES VLAN_CAPABILITIES(ifp); #endif fail: end_synchronized_op(sc, 0); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: case SIOCGIFXMEDIA: ifmedia_ioctl(ifp, ifr, &pi->media, cmd); break; case SIOCGI2C: { struct ifi2creq i2c; rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); if (rc != 0) break; if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { rc = EPERM; break; } if (i2c.len > sizeof(i2c.data)) { rc = EINVAL; break; } rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr, i2c.offset, i2c.len, &i2c.data[0]); end_synchronized_op(sc, 0); if (rc == 0) rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); break; } default: rc = ether_ioctl(ifp, cmd, data); } return (rc); } static int cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) { struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct adapter *sc; struct sge_txq *txq; void *items[1]; int rc; M_ASSERTPKTHDR(m); MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ #if defined(KERN_TLS) || defined(RATELIMIT) if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) MPASS(m->m_pkthdr.snd_tag->ifp == ifp); #endif if (__predict_false(pi->link_cfg.link_ok == false)) { m_freem(m); return (ENETDOWN); } rc = parse_pkt(&m, vi->flags & TX_USES_VM_WR); if (__predict_false(rc != 0)) { MPASS(m == NULL); /* was freed already */ atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ return (rc); } #ifdef RATELIMIT if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) { if (m->m_pkthdr.snd_tag->type == IF_SND_TAG_TYPE_RATE_LIMIT) return (ethofld_transmit(ifp, m)); } #endif /* Select a txq. */ sc = vi->adapter; txq = &sc->sge.txq[vi->first_txq]; if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + vi->rsrv_noflowq); items[0] = m; rc = mp_ring_enqueue(txq->r, items, 1, 256); if (__predict_false(rc != 0)) m_freem(m); return (rc); } static void cxgbe_qflush(struct ifnet *ifp) { struct vi_info *vi = ifp->if_softc; struct sge_txq *txq; int i; /* queues do not exist if !VI_INIT_DONE. */ if (vi->flags & VI_INIT_DONE) { for_each_txq(vi, i, txq) { TXQ_LOCK(txq); txq->eq.flags |= EQ_QFLUSH; TXQ_UNLOCK(txq); while (!mp_ring_is_idle(txq->r)) { mp_ring_check_drainage(txq->r, 4096); pause("qflush", 1); } TXQ_LOCK(txq); txq->eq.flags &= ~EQ_QFLUSH; TXQ_UNLOCK(txq); } } if_qflush(ifp); } static uint64_t vi_get_counter(struct ifnet *ifp, ift_counter c) { struct vi_info *vi = ifp->if_softc; struct fw_vi_stats_vf *s = &vi->stats; mtx_lock(&vi->tick_mtx); vi_refresh_stats(vi); mtx_unlock(&vi->tick_mtx); switch (c) { case IFCOUNTER_IPACKETS: return (s->rx_bcast_frames + s->rx_mcast_frames + s->rx_ucast_frames); case IFCOUNTER_IERRORS: return (s->rx_err_frames); case IFCOUNTER_OPACKETS: return (s->tx_bcast_frames + s->tx_mcast_frames + s->tx_ucast_frames + s->tx_offload_frames); case IFCOUNTER_OERRORS: return (s->tx_drop_frames); case IFCOUNTER_IBYTES: return (s->rx_bcast_bytes + s->rx_mcast_bytes + s->rx_ucast_bytes); case IFCOUNTER_OBYTES: return (s->tx_bcast_bytes + s->tx_mcast_bytes + s->tx_ucast_bytes + s->tx_offload_bytes); case IFCOUNTER_IMCASTS: return (s->rx_mcast_frames); case IFCOUNTER_OMCASTS: return (s->tx_mcast_frames); case IFCOUNTER_OQDROPS: { uint64_t drops; drops = 0; if (vi->flags & VI_INIT_DONE) { int i; struct sge_txq *txq; for_each_txq(vi, i, txq) drops += counter_u64_fetch(txq->r->dropped); } return (drops); } default: return (if_get_counter_default(ifp, c)); } } static uint64_t cxgbe_get_counter(struct ifnet *ifp, ift_counter c) { struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct port_stats *s = &pi->stats; mtx_lock(&vi->tick_mtx); cxgbe_refresh_stats(vi); mtx_unlock(&vi->tick_mtx); switch (c) { case IFCOUNTER_IPACKETS: return (s->rx_frames); case IFCOUNTER_IERRORS: return (s->rx_jabber + s->rx_runt + s->rx_too_long + s->rx_fcs_err + s->rx_len_err); case IFCOUNTER_OPACKETS: return (s->tx_frames); case IFCOUNTER_OERRORS: return (s->tx_error_frames); case IFCOUNTER_IBYTES: return (s->rx_octets); case IFCOUNTER_OBYTES: return (s->tx_octets); case IFCOUNTER_IMCASTS: return (s->rx_mcast_frames); case IFCOUNTER_OMCASTS: return (s->tx_mcast_frames); case IFCOUNTER_IQDROPS: return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + s->rx_trunc3 + pi->tnl_cong_drops); case IFCOUNTER_OQDROPS: { uint64_t drops; drops = s->tx_drop; if (vi->flags & VI_INIT_DONE) { int i; struct sge_txq *txq; for_each_txq(vi, i, txq) drops += counter_u64_fetch(txq->r->dropped); } return (drops); } default: return (if_get_counter_default(ifp, c)); } } #if defined(KERN_TLS) || defined(RATELIMIT) static int cxgbe_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params, struct m_snd_tag **pt) { int error; switch (params->hdr.type) { #ifdef RATELIMIT case IF_SND_TAG_TYPE_RATE_LIMIT: error = cxgbe_rate_tag_alloc(ifp, params, pt); break; #endif #ifdef KERN_TLS case IF_SND_TAG_TYPE_TLS: error = cxgbe_tls_tag_alloc(ifp, params, pt); break; #endif default: error = EOPNOTSUPP; } return (error); } static int cxgbe_snd_tag_modify(struct m_snd_tag *mst, union if_snd_tag_modify_params *params) { switch (mst->type) { #ifdef RATELIMIT case IF_SND_TAG_TYPE_RATE_LIMIT: return (cxgbe_rate_tag_modify(mst, params)); #endif default: return (EOPNOTSUPP); } } static int cxgbe_snd_tag_query(struct m_snd_tag *mst, union if_snd_tag_query_params *params) { switch (mst->type) { #ifdef RATELIMIT case IF_SND_TAG_TYPE_RATE_LIMIT: return (cxgbe_rate_tag_query(mst, params)); #endif default: return (EOPNOTSUPP); } } static void cxgbe_snd_tag_free(struct m_snd_tag *mst) { switch (mst->type) { #ifdef RATELIMIT case IF_SND_TAG_TYPE_RATE_LIMIT: cxgbe_rate_tag_free(mst); return; #endif #ifdef KERN_TLS case IF_SND_TAG_TYPE_TLS: cxgbe_tls_tag_free(mst); return; #endif default: panic("shouldn't get here"); } } #endif /* * The kernel picks a media from the list we had provided but we still validate * the requeste. */ int cxgbe_media_change(struct ifnet *ifp) { struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct ifmedia *ifm = &pi->media; struct link_config *lc = &pi->link_cfg; struct adapter *sc = pi->adapter; int rc; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec"); if (rc != 0) return (rc); PORT_LOCK(pi); if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { /* ifconfig .. media autoselect */ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { rc = ENOTSUP; /* AN not supported by transceiver */ goto done; } lc->requested_aneg = AUTONEG_ENABLE; lc->requested_speed = 0; lc->requested_fc |= PAUSE_AUTONEG; } else { lc->requested_aneg = AUTONEG_DISABLE; lc->requested_speed = ifmedia_baudrate(ifm->ifm_media) / 1000000; lc->requested_fc = 0; if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) lc->requested_fc |= PAUSE_RX; if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) lc->requested_fc |= PAUSE_TX; } if (pi->up_vis > 0) { fixup_link_config(pi); rc = apply_link_config(pi); } done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); return (rc); } /* * Base media word (without ETHER, pause, link active, etc.) for the port at the * given speed. */ static int port_mword(struct port_info *pi, uint32_t speed) { MPASS(speed & M_FW_PORT_CAP32_SPEED); MPASS(powerof2(speed)); switch(pi->port_type) { case FW_PORT_TYPE_BT_SGMII: case FW_PORT_TYPE_BT_XFI: case FW_PORT_TYPE_BT_XAUI: /* BaseT */ switch (speed) { case FW_PORT_CAP32_SPEED_100M: return (IFM_100_T); case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_T); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_T); } break; case FW_PORT_TYPE_KX4: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_KX4); break; case FW_PORT_TYPE_CX4: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_CX4); break; case FW_PORT_TYPE_KX: if (speed == FW_PORT_CAP32_SPEED_1G) return (IFM_1000_KX); break; case FW_PORT_TYPE_KR: case FW_PORT_TYPE_BP_AP: case FW_PORT_TYPE_BP4_AP: case FW_PORT_TYPE_BP40_BA: case FW_PORT_TYPE_KR4_100G: case FW_PORT_TYPE_KR_SFP28: case FW_PORT_TYPE_KR_XLAUI: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_KX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_KR); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_KR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_KR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_KR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_KR4); } break; case FW_PORT_TYPE_FIBER_XFI: case FW_PORT_TYPE_FIBER_XAUI: case FW_PORT_TYPE_SFP: case FW_PORT_TYPE_QSFP_10G: case FW_PORT_TYPE_QSA: case FW_PORT_TYPE_QSFP: case FW_PORT_TYPE_CR4_QSFP: case FW_PORT_TYPE_CR_QSFP: case FW_PORT_TYPE_CR2_QSFP: case FW_PORT_TYPE_SFP28: /* Pluggable transceiver */ switch (pi->mod_type) { case FW_PORT_MOD_TYPE_LR: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_LX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_LR); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_LR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_LR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_LR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_LR4); } break; case FW_PORT_MOD_TYPE_SR: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_SX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_SR); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_SR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_SR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_SR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_SR4); } break; case FW_PORT_MOD_TYPE_ER: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_ER); break; case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: switch (speed) { case FW_PORT_CAP32_SPEED_1G: return (IFM_1000_CX); case FW_PORT_CAP32_SPEED_10G: return (IFM_10G_TWINAX); case FW_PORT_CAP32_SPEED_25G: return (IFM_25G_CR); case FW_PORT_CAP32_SPEED_40G: return (IFM_40G_CR4); case FW_PORT_CAP32_SPEED_50G: return (IFM_50G_CR2); case FW_PORT_CAP32_SPEED_100G: return (IFM_100G_CR4); } break; case FW_PORT_MOD_TYPE_LRM: if (speed == FW_PORT_CAP32_SPEED_10G) return (IFM_10G_LRM); break; case FW_PORT_MOD_TYPE_NA: MPASS(0); /* Not pluggable? */ /* fall throough */ case FW_PORT_MOD_TYPE_ERROR: case FW_PORT_MOD_TYPE_UNKNOWN: case FW_PORT_MOD_TYPE_NOTSUPPORTED: break; case FW_PORT_MOD_TYPE_NONE: return (IFM_NONE); } break; case FW_PORT_TYPE_NONE: return (IFM_NONE); } return (IFM_UNKNOWN); } void cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4med") != 0) return; PORT_LOCK(pi); if (pi->up_vis == 0) { /* * If all the interfaces are administratively down the firmware * does not report transceiver changes. Refresh port info here * so that ifconfig displays accurate ifmedia at all times. * This is the only reason we have a synchronized op in this * function. Just PORT_LOCK would have been enough otherwise. */ t4_update_port_info(pi); build_medialist(pi); } /* ifm_status */ ifmr->ifm_status = IFM_AVALID; if (lc->link_ok == false) goto done; ifmr->ifm_status |= IFM_ACTIVE; /* ifm_active */ ifmr->ifm_active = IFM_ETHER | IFM_FDX; ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE); if (lc->fc & PAUSE_RX) ifmr->ifm_active |= IFM_ETH_RXPAUSE; if (lc->fc & PAUSE_TX) ifmr->ifm_active |= IFM_ETH_TXPAUSE; ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed)); done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); } static int vcxgbe_probe(device_t dev) { char buf[128]; struct vi_info *vi = device_get_softc(dev); snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, vi - vi->pi->vi); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } static int alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi) { int func, index, rc; uint32_t param, val; ASSERT_SYNCHRONIZED_OP(sc); index = vi - pi->vi; MPASS(index > 0); /* This function deals with _extra_ VIs only */ KASSERT(index < nitems(vi_mac_funcs), ("%s: VI %s doesn't have a MAC func", __func__, device_get_nameunit(vi->dev))); func = vi_mac_funcs[index]; rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0); if (rc < 0) { CH_ERR(vi, "failed to allocate virtual interface %d" "for port %d: %d\n", index, pi->port_id, -rc); return (-rc); } vi->viid = rc; if (vi->rss_size == 1) { /* * This VI didn't get a slice of the RSS table. Reduce the * number of VIs being created (hw.cxgbe.num_vis) or modify the * configuration file (nvi, rssnvi for this PF) if this is a * problem. */ device_printf(vi->dev, "RSS table not available.\n"); vi->rss_base = 0xffff; return (0); } param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | V_FW_PARAMS_PARAM_YZ(vi->viid); rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc) vi->rss_base = 0xffff; else { MPASS((val >> 16) == vi->rss_size); vi->rss_base = val & 0xffff; } return (0); } static int vcxgbe_attach(device_t dev) { struct vi_info *vi; struct port_info *pi; struct adapter *sc; int rc; vi = device_get_softc(dev); pi = vi->pi; sc = pi->adapter; rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via"); if (rc) return (rc); rc = alloc_extra_vi(sc, pi, vi); end_synchronized_op(sc, 0); if (rc) return (rc); rc = cxgbe_vi_attach(dev, vi); if (rc) { t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); return (rc); } return (0); } static int vcxgbe_detach(device_t dev) { struct vi_info *vi; struct adapter *sc; vi = device_get_softc(dev); sc = vi->adapter; doom_vi(sc, vi); cxgbe_vi_detach(vi); t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); end_synchronized_op(sc, 0); return (0); } static struct callout fatal_callout; static struct taskqueue *reset_tq; static void delayed_panic(void *arg) { struct adapter *sc = arg; panic("%s: panic on fatal error", device_get_nameunit(sc->dev)); } void t4_fatal_err(struct adapter *sc, bool fw_error) { t4_shutdown_adapter(sc); log(LOG_ALERT, "%s: encountered fatal error, adapter stopped.\n", device_get_nameunit(sc->dev)); if (fw_error) { if (sc->flags & CHK_MBOX_ACCESS) ASSERT_SYNCHRONIZED_OP(sc); sc->flags |= ADAP_ERR; } else { ADAPTER_LOCK(sc); sc->flags |= ADAP_ERR; ADAPTER_UNLOCK(sc); } #ifdef TCP_OFFLOAD taskqueue_enqueue(taskqueue_thread, &sc->async_event_task); #endif if (t4_panic_on_fatal_err) { CH_ALERT(sc, "panicking on fatal error (after 30s).\n"); callout_reset(&fatal_callout, hz * 30, delayed_panic, sc); } else if (t4_reset_on_fatal_err) { CH_ALERT(sc, "resetting on fatal error.\n"); taskqueue_enqueue(reset_tq, &sc->reset_task); } } void t4_add_adapter(struct adapter *sc) { sx_xlock(&t4_list_lock); SLIST_INSERT_HEAD(&t4_list, sc, link); sx_xunlock(&t4_list_lock); } int t4_map_bars_0_and_4(struct adapter *sc) { sc->regs_rid = PCIR_BAR(0); sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->regs_rid, RF_ACTIVE); if (sc->regs_res == NULL) { device_printf(sc->dev, "cannot map registers.\n"); return (ENXIO); } sc->bt = rman_get_bustag(sc->regs_res); sc->bh = rman_get_bushandle(sc->regs_res); sc->mmio_len = rman_get_size(sc->regs_res); setbit(&sc->doorbells, DOORBELL_KDB); sc->msix_rid = PCIR_BAR(4); sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->msix_rid, RF_ACTIVE); if (sc->msix_res == NULL) { device_printf(sc->dev, "cannot map MSI-X BAR.\n"); return (ENXIO); } return (0); } int t4_map_bar_2(struct adapter *sc) { /* * T4: only iWARP driver uses the userspace doorbells. There is no need * to map it if RDMA is disabled. */ if (is_t4(sc) && sc->rdmacaps == 0) return (0); sc->udbs_rid = PCIR_BAR(2); sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->udbs_rid, RF_ACTIVE); if (sc->udbs_res == NULL) { device_printf(sc->dev, "cannot map doorbell BAR.\n"); return (ENXIO); } sc->udbs_base = rman_get_virtual(sc->udbs_res); if (chip_id(sc) >= CHELSIO_T5) { setbit(&sc->doorbells, DOORBELL_UDB); #if defined(__i386__) || defined(__amd64__) if (t5_write_combine) { int rc, mode; /* * Enable write combining on BAR2. This is the * userspace doorbell BAR and is split into 128B * (UDBS_SEG_SIZE) doorbell regions, each associated * with an egress queue. The first 64B has the doorbell * and the second 64B can be used to submit a tx work * request with an implicit doorbell. */ rc = pmap_change_attr((vm_offset_t)sc->udbs_base, rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); if (rc == 0) { clrbit(&sc->doorbells, DOORBELL_UDB); setbit(&sc->doorbells, DOORBELL_WCWR); setbit(&sc->doorbells, DOORBELL_UDBWC); } else { device_printf(sc->dev, "couldn't enable write combining: %d\n", rc); } mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0); t4_write_reg(sc, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) | mode); } #endif } sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0; return (0); } struct memwin_init { uint32_t base; uint32_t aperture; }; static const struct memwin_init t4_memwin[NUM_MEMWIN] = { { MEMWIN0_BASE, MEMWIN0_APERTURE }, { MEMWIN1_BASE, MEMWIN1_APERTURE }, { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } }; static const struct memwin_init t5_memwin[NUM_MEMWIN] = { { MEMWIN0_BASE, MEMWIN0_APERTURE }, { MEMWIN1_BASE, MEMWIN1_APERTURE }, { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, }; static void setup_memwin(struct adapter *sc) { const struct memwin_init *mw_init; struct memwin *mw; int i; uint32_t bar0; if (is_t4(sc)) { /* * Read low 32b of bar0 indirectly via the hardware backdoor * mechanism. Works from within PCI passthrough environments * too, where rman_get_start() can return a different value. We * need to program the T4 memory window decoders with the actual * addresses that will be coming across the PCIe link. */ bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; mw_init = &t4_memwin[0]; } else { /* T5+ use the relative offset inside the PCIe BAR */ bar0 = 0; mw_init = &t5_memwin[0]; } for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { if (!rw_initialized(&mw->mw_lock)) { rw_init(&mw->mw_lock, "memory window access"); mw->mw_base = mw_init->base; mw->mw_aperture = mw_init->aperture; mw->mw_curpos = 0; } t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), (mw->mw_base + bar0) | V_BIR(0) | V_WINDOW(ilog2(mw->mw_aperture) - 10)); rw_wlock(&mw->mw_lock); position_memwin(sc, i, mw->mw_curpos); rw_wunlock(&mw->mw_lock); } /* flush */ t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); } /* * Positions the memory window at the given address in the card's address space. * There are some alignment requirements and the actual position may be at an * address prior to the requested address. mw->mw_curpos always has the actual * position of the window. */ static void position_memwin(struct adapter *sc, int idx, uint32_t addr) { struct memwin *mw; uint32_t pf; uint32_t reg; MPASS(idx >= 0 && idx < NUM_MEMWIN); mw = &sc->memwin[idx]; rw_assert(&mw->mw_lock, RA_WLOCKED); if (is_t4(sc)) { pf = 0; mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ } else { pf = V_PFNUM(sc->pf); mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ } reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); t4_write_reg(sc, reg, mw->mw_curpos | pf); t4_read_reg(sc, reg); /* flush */ } int rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, int len, int rw) { struct memwin *mw; uint32_t mw_end, v; MPASS(idx >= 0 && idx < NUM_MEMWIN); /* Memory can only be accessed in naturally aligned 4 byte units */ if (addr & 3 || len & 3 || len <= 0) return (EINVAL); mw = &sc->memwin[idx]; while (len > 0) { rw_rlock(&mw->mw_lock); mw_end = mw->mw_curpos + mw->mw_aperture; if (addr >= mw_end || addr < mw->mw_curpos) { /* Will need to reposition the window */ if (!rw_try_upgrade(&mw->mw_lock)) { rw_runlock(&mw->mw_lock); rw_wlock(&mw->mw_lock); } rw_assert(&mw->mw_lock, RA_WLOCKED); position_memwin(sc, idx, addr); rw_downgrade(&mw->mw_lock); mw_end = mw->mw_curpos + mw->mw_aperture; } rw_assert(&mw->mw_lock, RA_RLOCKED); while (addr < mw_end && len > 0) { if (rw == 0) { v = t4_read_reg(sc, mw->mw_base + addr - mw->mw_curpos); *val++ = le32toh(v); } else { v = *val++; t4_write_reg(sc, mw->mw_base + addr - mw->mw_curpos, htole32(v)); } addr += 4; len -= 4; } rw_runlock(&mw->mw_lock); } return (0); } static void t4_init_atid_table(struct adapter *sc) { struct tid_info *t; int i; t = &sc->tids; if (t->natids == 0) return; MPASS(t->atid_tab == NULL); t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE, M_ZERO | M_WAITOK); mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF); t->afree = t->atid_tab; t->atids_in_use = 0; for (i = 1; i < t->natids; i++) t->atid_tab[i - 1].next = &t->atid_tab[i]; t->atid_tab[t->natids - 1].next = NULL; } static void t4_free_atid_table(struct adapter *sc) { struct tid_info *t; t = &sc->tids; KASSERT(t->atids_in_use == 0, ("%s: %d atids still in use.", __func__, t->atids_in_use)); if (mtx_initialized(&t->atid_lock)) mtx_destroy(&t->atid_lock); free(t->atid_tab, M_CXGBE); t->atid_tab = NULL; } int alloc_atid(struct adapter *sc, void *ctx) { struct tid_info *t = &sc->tids; int atid = -1; mtx_lock(&t->atid_lock); if (t->afree) { union aopen_entry *p = t->afree; atid = p - t->atid_tab; MPASS(atid <= M_TID_TID); t->afree = p->next; p->data = ctx; t->atids_in_use++; } mtx_unlock(&t->atid_lock); return (atid); } void * lookup_atid(struct adapter *sc, int atid) { struct tid_info *t = &sc->tids; return (t->atid_tab[atid].data); } void free_atid(struct adapter *sc, int atid) { struct tid_info *t = &sc->tids; union aopen_entry *p = &t->atid_tab[atid]; mtx_lock(&t->atid_lock); p->next = t->afree; t->afree = p; t->atids_in_use--; mtx_unlock(&t->atid_lock); } static void queue_tid_release(struct adapter *sc, int tid) { CXGBE_UNIMPLEMENTED("deferred tid release"); } void release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq) { struct wrqe *wr; struct cpl_tid_release *req; wr = alloc_wrqe(sizeof(*req), ctrlq); if (wr == NULL) { queue_tid_release(sc, tid); /* defer */ return; } req = wrtod(wr); INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid); t4_wrq_tx(sc, wr); } static int t4_range_cmp(const void *a, const void *b) { return ((const struct t4_range *)a)->start - ((const struct t4_range *)b)->start; } /* * Verify that the memory range specified by the addr/len pair is valid within * the card's address space. */ static int validate_mem_range(struct adapter *sc, uint32_t addr, uint32_t len) { struct t4_range mem_ranges[4], *r, *next; uint32_t em, addr_len; int i, n, remaining; /* Memory can only be accessed in naturally aligned 4 byte units */ if (addr & 3 || len & 3 || len == 0) return (EINVAL); /* Enabled memories */ em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); r = &mem_ranges[0]; n = 0; bzero(r, sizeof(mem_ranges)); if (em & F_EDRAM0_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); r->size = G_EDRAM0_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EDRAM0_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } if (em & F_EDRAM1_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); r->size = G_EDRAM1_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EDRAM1_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } if (em & F_EXT_MEM_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); r->size = G_EXT_MEM_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EXT_MEM_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); r->size = G_EXT_MEM1_SIZE(addr_len) << 20; if (r->size > 0) { r->start = G_EXT_MEM1_BASE(addr_len) << 20; if (addr >= r->start && addr + len <= r->start + r->size) return (0); r++; n++; } } MPASS(n <= nitems(mem_ranges)); if (n > 1) { /* Sort and merge the ranges. */ qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); /* Start from index 0 and examine the next n - 1 entries. */ r = &mem_ranges[0]; for (remaining = n - 1; remaining > 0; remaining--, r++) { MPASS(r->size > 0); /* r is a valid entry. */ next = r + 1; MPASS(next->size > 0); /* and so is the next one. */ while (r->start + r->size >= next->start) { /* Merge the next one into the current entry. */ r->size = max(r->start + r->size, next->start + next->size) - r->start; n--; /* One fewer entry in total. */ if (--remaining == 0) goto done; /* short circuit */ next++; } if (next != r + 1) { /* * Some entries were merged into r and next * points to the first valid entry that couldn't * be merged. */ MPASS(next->size > 0); /* must be valid */ memcpy(r + 1, next, remaining * sizeof(*r)); #ifdef INVARIANTS /* * This so that the foo->size assertion in the * next iteration of the loop do the right * thing for entries that were pulled up and are * no longer valid. */ MPASS(n < nitems(mem_ranges)); bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * sizeof(struct t4_range)); #endif } } done: /* Done merging the ranges. */ MPASS(n > 0); r = &mem_ranges[0]; for (i = 0; i < n; i++, r++) { if (addr >= r->start && addr + len <= r->start + r->size) return (0); } } return (EFAULT); } static int fwmtype_to_hwmtype(int mtype) { switch (mtype) { case FW_MEMTYPE_EDC0: return (MEM_EDC0); case FW_MEMTYPE_EDC1: return (MEM_EDC1); case FW_MEMTYPE_EXTMEM: return (MEM_MC0); case FW_MEMTYPE_EXTMEM1: return (MEM_MC1); default: panic("%s: cannot translate fw mtype %d.", __func__, mtype); } } /* * Verify that the memory range specified by the memtype/offset/len pair is * valid and lies entirely within the memtype specified. The global address of * the start of the range is returned in addr. */ static int validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, uint32_t len, uint32_t *addr) { uint32_t em, addr_len, maddr; /* Memory can only be accessed in naturally aligned 4 byte units */ if (off & 3 || len & 3 || len == 0) return (EINVAL); em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); switch (fwmtype_to_hwmtype(mtype)) { case MEM_EDC0: if (!(em & F_EDRAM0_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); maddr = G_EDRAM0_BASE(addr_len) << 20; break; case MEM_EDC1: if (!(em & F_EDRAM1_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); maddr = G_EDRAM1_BASE(addr_len) << 20; break; case MEM_MC: if (!(em & F_EXT_MEM_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); maddr = G_EXT_MEM_BASE(addr_len) << 20; break; case MEM_MC1: if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) return (EINVAL); addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); maddr = G_EXT_MEM1_BASE(addr_len) << 20; break; default: return (EINVAL); } *addr = maddr + off; /* global address */ return (validate_mem_range(sc, *addr, len)); } static int fixup_devlog_params(struct adapter *sc) { struct devlog_params *dparams = &sc->params.devlog; int rc; rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, dparams->size, &dparams->addr); return (rc); } static void update_nirq(struct intrs_and_queues *iaq, int nports) { iaq->nirq = T4_EXTRA_INTR; iaq->nirq += nports * max(iaq->nrxq, iaq->nnmrxq); iaq->nirq += nports * iaq->nofldrxq; iaq->nirq += nports * (iaq->num_vis - 1) * max(iaq->nrxq_vi, iaq->nnmrxq_vi); iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi; } /* * Adjust requirements to fit the number of interrupts available. */ static void calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype, int navail) { int old_nirq; const int nports = sc->params.nports; MPASS(nports > 0); MPASS(navail > 0); bzero(iaq, sizeof(*iaq)); iaq->intr_type = itype; iaq->num_vis = t4_num_vis; iaq->ntxq = t4_ntxq; iaq->ntxq_vi = t4_ntxq_vi; iaq->nrxq = t4_nrxq; iaq->nrxq_vi = t4_nrxq_vi; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) if (is_offload(sc) || is_ethoffload(sc)) { iaq->nofldtxq = t4_nofldtxq; iaq->nofldtxq_vi = t4_nofldtxq_vi; } #endif #ifdef TCP_OFFLOAD if (is_offload(sc)) { iaq->nofldrxq = t4_nofldrxq; iaq->nofldrxq_vi = t4_nofldrxq_vi; } #endif #ifdef DEV_NETMAP if (t4_native_netmap & NN_MAIN_VI) { iaq->nnmtxq = t4_nnmtxq; iaq->nnmrxq = t4_nnmrxq; } if (t4_native_netmap & NN_EXTRA_VI) { iaq->nnmtxq_vi = t4_nnmtxq_vi; iaq->nnmrxq_vi = t4_nnmrxq_vi; } #endif update_nirq(iaq, nports); if (iaq->nirq <= navail && (itype != INTR_MSI || powerof2(iaq->nirq))) { /* * This is the normal case -- there are enough interrupts for * everything. */ goto done; } /* * If extra VIs have been configured try reducing their count and see if * that works. */ while (iaq->num_vis > 1) { iaq->num_vis--; update_nirq(iaq, nports); if (iaq->nirq <= navail && (itype != INTR_MSI || powerof2(iaq->nirq))) { device_printf(sc->dev, "virtual interfaces per port " "reduced to %d from %d. nrxq=%u, nofldrxq=%u, " "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. " "itype %d, navail %u, nirq %d.\n", iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi, itype, navail, iaq->nirq); goto done; } } /* * Extra VIs will not be created. Log a message if they were requested. */ MPASS(iaq->num_vis == 1); iaq->ntxq_vi = iaq->nrxq_vi = 0; iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; if (iaq->num_vis != t4_num_vis) { device_printf(sc->dev, "extra virtual interfaces disabled. " "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, " "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n", iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi, itype, navail, iaq->nirq); } /* * Keep reducing the number of NIC rx queues to the next lower power of * 2 (for even RSS distribution) and halving the TOE rx queues and see * if that works. */ do { if (iaq->nrxq > 1) { do { iaq->nrxq--; } while (!powerof2(iaq->nrxq)); if (iaq->nnmrxq > iaq->nrxq) iaq->nnmrxq = iaq->nrxq; } if (iaq->nofldrxq > 1) iaq->nofldrxq >>= 1; old_nirq = iaq->nirq; update_nirq(iaq, nports); if (iaq->nirq <= navail && (itype != INTR_MSI || powerof2(iaq->nirq))) { device_printf(sc->dev, "running with reduced number of " "rx queues because of shortage of interrupts. " "nrxq=%u, nofldrxq=%u. " "itype %d, navail %u, nirq %d.\n", iaq->nrxq, iaq->nofldrxq, itype, navail, iaq->nirq); goto done; } } while (old_nirq != iaq->nirq); /* One interrupt for everything. Ugh. */ device_printf(sc->dev, "running with minimal number of queues. " "itype %d, navail %u.\n", itype, navail); iaq->nirq = 1; iaq->nrxq = 1; iaq->ntxq = 1; if (iaq->nofldrxq > 0) { iaq->nofldrxq = 1; iaq->nofldtxq = 1; } iaq->nnmtxq = 0; iaq->nnmrxq = 0; done: MPASS(iaq->num_vis > 0); if (iaq->num_vis > 1) { MPASS(iaq->nrxq_vi > 0); MPASS(iaq->ntxq_vi > 0); } MPASS(iaq->nirq > 0); MPASS(iaq->nrxq > 0); MPASS(iaq->ntxq > 0); if (itype == INTR_MSI) { MPASS(powerof2(iaq->nirq)); } } static int cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq) { int rc, itype, navail, nalloc; for (itype = INTR_MSIX; itype; itype >>= 1) { if ((itype & t4_intr_types) == 0) continue; /* not allowed */ if (itype == INTR_MSIX) navail = pci_msix_count(sc->dev); else if (itype == INTR_MSI) navail = pci_msi_count(sc->dev); else navail = 1; restart: if (navail == 0) continue; calculate_iaq(sc, iaq, itype, navail); nalloc = iaq->nirq; rc = 0; if (itype == INTR_MSIX) rc = pci_alloc_msix(sc->dev, &nalloc); else if (itype == INTR_MSI) rc = pci_alloc_msi(sc->dev, &nalloc); if (rc == 0 && nalloc > 0) { if (nalloc == iaq->nirq) return (0); /* * Didn't get the number requested. Use whatever number * the kernel is willing to allocate. */ device_printf(sc->dev, "fewer vectors than requested, " "type=%d, req=%d, rcvd=%d; will downshift req.\n", itype, iaq->nirq, nalloc); pci_release_msi(sc->dev); navail = nalloc; goto restart; } device_printf(sc->dev, "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", itype, rc, iaq->nirq, nalloc); } device_printf(sc->dev, "failed to find a usable interrupt type. " "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, pci_msix_count(sc->dev), pci_msi_count(sc->dev)); return (ENXIO); } #define FW_VERSION(chip) ( \ V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) /* Just enough of fw_hdr to cover all version info. */ struct fw_h { __u8 ver; __u8 chip; __be16 len512; __be32 fw_ver; __be32 tp_microcode_ver; __u8 intfver_nic; __u8 intfver_vnic; __u8 intfver_ofld; __u8 intfver_ri; __u8 intfver_iscsipdu; __u8 intfver_iscsi; __u8 intfver_fcoepdu; __u8 intfver_fcoe; }; /* Spot check a couple of fields. */ CTASSERT(offsetof(struct fw_h, fw_ver) == offsetof(struct fw_hdr, fw_ver)); CTASSERT(offsetof(struct fw_h, intfver_nic) == offsetof(struct fw_hdr, intfver_nic)); CTASSERT(offsetof(struct fw_h, intfver_fcoe) == offsetof(struct fw_hdr, intfver_fcoe)); struct fw_info { uint8_t chip; char *kld_name; char *fw_mod_name; struct fw_h fw_h; } fw_info[] = { { .chip = CHELSIO_T4, .kld_name = "t4fw_cfg", .fw_mod_name = "t4fw", .fw_h = { .chip = FW_HDR_CHIP_T4, .fw_ver = htobe32(FW_VERSION(T4)), .intfver_nic = FW_INTFVER(T4, NIC), .intfver_vnic = FW_INTFVER(T4, VNIC), .intfver_ofld = FW_INTFVER(T4, OFLD), .intfver_ri = FW_INTFVER(T4, RI), .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), .intfver_iscsi = FW_INTFVER(T4, ISCSI), .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), .intfver_fcoe = FW_INTFVER(T4, FCOE), }, }, { .chip = CHELSIO_T5, .kld_name = "t5fw_cfg", .fw_mod_name = "t5fw", .fw_h = { .chip = FW_HDR_CHIP_T5, .fw_ver = htobe32(FW_VERSION(T5)), .intfver_nic = FW_INTFVER(T5, NIC), .intfver_vnic = FW_INTFVER(T5, VNIC), .intfver_ofld = FW_INTFVER(T5, OFLD), .intfver_ri = FW_INTFVER(T5, RI), .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), .intfver_iscsi = FW_INTFVER(T5, ISCSI), .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), .intfver_fcoe = FW_INTFVER(T5, FCOE), }, }, { .chip = CHELSIO_T6, .kld_name = "t6fw_cfg", .fw_mod_name = "t6fw", .fw_h = { .chip = FW_HDR_CHIP_T6, .fw_ver = htobe32(FW_VERSION(T6)), .intfver_nic = FW_INTFVER(T6, NIC), .intfver_vnic = FW_INTFVER(T6, VNIC), .intfver_ofld = FW_INTFVER(T6, OFLD), .intfver_ri = FW_INTFVER(T6, RI), .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), .intfver_iscsi = FW_INTFVER(T6, ISCSI), .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), .intfver_fcoe = FW_INTFVER(T6, FCOE), }, } }; static struct fw_info * find_fw_info(int chip) { int i; for (i = 0; i < nitems(fw_info); i++) { if (fw_info[i].chip == chip) return (&fw_info[i]); } return (NULL); } /* * Is the given firmware API compatible with the one the driver was compiled * with? */ static int fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2) { /* short circuit if it's the exact same firmware version */ if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) return (1); /* * XXX: Is this too conservative? Perhaps I should limit this to the * features that are supported in the driver. */ #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) return (1); #undef SAME_INTF return (0); } static int load_fw_module(struct adapter *sc, const struct firmware **dcfg, const struct firmware **fw) { struct fw_info *fw_info; *dcfg = NULL; if (fw != NULL) *fw = NULL; fw_info = find_fw_info(chip_id(sc)); if (fw_info == NULL) { device_printf(sc->dev, "unable to look up firmware information for chip %d.\n", chip_id(sc)); return (EINVAL); } *dcfg = firmware_get(fw_info->kld_name); if (*dcfg != NULL) { if (fw != NULL) *fw = firmware_get(fw_info->fw_mod_name); return (0); } return (ENOENT); } static void unload_fw_module(struct adapter *sc, const struct firmware *dcfg, const struct firmware *fw) { if (fw != NULL) firmware_put(fw, FIRMWARE_UNLOAD); if (dcfg != NULL) firmware_put(dcfg, FIRMWARE_UNLOAD); } /* * Return values: * 0 means no firmware install attempted. * ERESTART means a firmware install was attempted and was successful. * +ve errno means a firmware install was attempted but failed. */ static int install_kld_firmware(struct adapter *sc, struct fw_h *card_fw, const struct fw_h *drv_fw, const char *reason, int *already) { const struct firmware *cfg, *fw; const uint32_t c = be32toh(card_fw->fw_ver); uint32_t d, k; int rc, fw_install; struct fw_h bundled_fw; bool load_attempted; cfg = fw = NULL; load_attempted = false; fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install; memcpy(&bundled_fw, drv_fw, sizeof(bundled_fw)); if (t4_fw_install < 0) { rc = load_fw_module(sc, &cfg, &fw); if (rc != 0 || fw == NULL) { device_printf(sc->dev, "failed to load firmware module: %d. cfg %p, fw %p;" " will use compiled-in firmware version for" "hw.cxgbe.fw_install checks.\n", rc, cfg, fw); } else { memcpy(&bundled_fw, fw->data, sizeof(bundled_fw)); } load_attempted = true; } d = be32toh(bundled_fw.fw_ver); if (reason != NULL) goto install; if ((sc->flags & FW_OK) == 0) { if (c == 0xffffffff) { reason = "missing"; goto install; } rc = 0; goto done; } if (!fw_compatible(card_fw, &bundled_fw)) { reason = "incompatible or unusable"; goto install; } if (d > c) { reason = "older than the version bundled with this driver"; goto install; } if (fw_install == 2 && d != c) { reason = "different than the version bundled with this driver"; goto install; } /* No reason to do anything to the firmware already on the card. */ rc = 0; goto done; install: rc = 0; if ((*already)++) goto done; if (fw_install == 0) { device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " "but the driver is prohibited from installing a firmware " "on the card.\n", G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); goto done; } /* * We'll attempt to install a firmware. Load the module first (if it * hasn't been loaded already). */ if (!load_attempted) { rc = load_fw_module(sc, &cfg, &fw); if (rc != 0 || fw == NULL) { device_printf(sc->dev, "failed to load firmware module: %d. cfg %p, fw %p\n", rc, cfg, fw); /* carry on */ } } if (fw == NULL) { device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " "but the driver cannot take corrective action because it " "is unable to load the firmware module.\n", G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); rc = sc->flags & FW_OK ? 0 : ENOENT; goto done; } k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver); if (k != d) { MPASS(t4_fw_install > 0); device_printf(sc->dev, "firmware in KLD (%u.%u.%u.%u) is not what the driver was " "expecting (%u.%u.%u.%u) and will not be used.\n", G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k), G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); rc = sc->flags & FW_OK ? 0 : EINVAL; goto done; } device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " "installing firmware %u.%u.%u.%u on card.\n", G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); if (rc != 0) { device_printf(sc->dev, "failed to install firmware: %d\n", rc); } else { /* Installed successfully, update the cached header too. */ rc = ERESTART; memcpy(card_fw, fw->data, sizeof(*card_fw)); } done: unload_fw_module(sc, cfg, fw); return (rc); } /* * Establish contact with the firmware and attempt to become the master driver. * * A firmware will be installed to the card if needed (if the driver is allowed * to do so). */ static int contact_firmware(struct adapter *sc) { int rc, already = 0; enum dev_state state; struct fw_info *fw_info; struct fw_hdr *card_fw; /* fw on the card */ const struct fw_h *drv_fw; fw_info = find_fw_info(chip_id(sc)); if (fw_info == NULL) { device_printf(sc->dev, "unable to look up firmware information for chip %d.\n", chip_id(sc)); return (EINVAL); } drv_fw = &fw_info->fw_h; /* Read the header of the firmware on the card */ card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); restart: rc = -t4_get_fw_hdr(sc, card_fw); if (rc != 0) { device_printf(sc->dev, "unable to read firmware header from card's flash: %d\n", rc); goto done; } rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL, &already); if (rc == ERESTART) goto restart; if (rc != 0) goto done; rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); if (rc < 0 || state == DEV_STATE_ERR) { rc = -rc; device_printf(sc->dev, "failed to connect to the firmware: %d, %d. " "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); #if 0 if (install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, "not responding properly to HELLO", &already) == ERESTART) goto restart; #endif goto done; } MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT); sc->flags |= FW_OK; /* The firmware responded to the FW_HELLO. */ if (rc == sc->pf) { sc->flags |= MASTER_PF; rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL, &already); if (rc == ERESTART) rc = 0; else if (rc != 0) goto done; } else if (state == DEV_STATE_UNINIT) { /* * We didn't get to be the master so we definitely won't be * configuring the chip. It's a bug if someone else hasn't * configured it already. */ device_printf(sc->dev, "couldn't be master(%d), " "device not already initialized either(%d). " "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); rc = EPROTO; goto done; } else { /* * Some other PF is the master and has configured the chip. * This is allowed but untested. */ device_printf(sc->dev, "PF%d is master, device state %d. " "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc); sc->cfcsum = 0; rc = 0; } done: if (rc != 0 && sc->flags & FW_OK) { t4_fw_bye(sc, sc->mbox); sc->flags &= ~FW_OK; } free(card_fw, M_CXGBE); return (rc); } static int copy_cfg_file_to_card(struct adapter *sc, char *cfg_file, uint32_t mtype, uint32_t moff) { struct fw_info *fw_info; const struct firmware *dcfg, *rcfg = NULL; const uint32_t *cfdata; uint32_t cflen, addr; int rc; load_fw_module(sc, &dcfg, NULL); /* Card specific interpretation of "default". */ if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { if (pci_get_device(sc->dev) == 0x440a) snprintf(cfg_file, sizeof(t4_cfg_file), UWIRE_CF); if (is_fpga(sc)) snprintf(cfg_file, sizeof(t4_cfg_file), FPGA_CF); } if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { if (dcfg == NULL) { device_printf(sc->dev, "KLD with default config is not available.\n"); rc = ENOENT; goto done; } cfdata = dcfg->data; cflen = dcfg->datasize & ~3; } else { char s[32]; fw_info = find_fw_info(chip_id(sc)); if (fw_info == NULL) { device_printf(sc->dev, "unable to look up firmware information for chip %d.\n", chip_id(sc)); rc = EINVAL; goto done; } snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file); rcfg = firmware_get(s); if (rcfg == NULL) { device_printf(sc->dev, "unable to load module \"%s\" for configuration " "profile \"%s\".\n", s, cfg_file); rc = ENOENT; goto done; } cfdata = rcfg->data; cflen = rcfg->datasize & ~3; } if (cflen > FLASH_CFG_MAX_SIZE) { device_printf(sc->dev, "config file too long (%d, max allowed is %d).\n", cflen, FLASH_CFG_MAX_SIZE); rc = EINVAL; goto done; } rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); if (rc != 0) { device_printf(sc->dev, "%s: addr (%d/0x%x) or len %d is not valid: %d.\n", __func__, mtype, moff, cflen, rc); rc = EINVAL; goto done; } write_via_memwin(sc, 2, addr, cfdata, cflen); done: if (rcfg != NULL) firmware_put(rcfg, FIRMWARE_UNLOAD); unload_fw_module(sc, dcfg, NULL); return (rc); } struct caps_allowed { uint16_t nbmcaps; uint16_t linkcaps; uint16_t switchcaps; uint16_t niccaps; uint16_t toecaps; uint16_t rdmacaps; uint16_t cryptocaps; uint16_t iscsicaps; uint16_t fcoecaps; }; #define FW_PARAM_DEV(param) \ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) #define FW_PARAM_PFVF(param) \ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) /* * Provide a configuration profile to the firmware and have it initialize the * chip accordingly. This may involve uploading a configuration file to the * card. */ static int apply_cfg_and_initialize(struct adapter *sc, char *cfg_file, const struct caps_allowed *caps_allowed) { int rc; struct fw_caps_config_cmd caps; uint32_t mtype, moff, finicsum, cfcsum, param, val; rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); if (rc != 0) { device_printf(sc->dev, "firmware reset failed: %d.\n", rc); return (rc); } bzero(&caps, sizeof(caps)); caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ); if (strncmp(cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0) { mtype = 0; moff = 0; caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); } else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) { mtype = FW_MEMTYPE_FLASH; moff = t4_flash_cfg_addr(sc); caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); } else { /* * Ask the firmware where it wants us to upload the config file. */ param = FW_PARAM_DEV(CF); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc != 0) { /* No support for config file? Shouldn't happen. */ device_printf(sc->dev, "failed to query config file location: %d.\n", rc); goto done; } mtype = G_FW_PARAMS_PARAM_Y(val); moff = G_FW_PARAMS_PARAM_Z(val) << 16; caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff); if (rc != 0) { device_printf(sc->dev, "failed to upload config file to card: %d.\n", rc); goto done; } } rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); if (rc != 0) { device_printf(sc->dev, "failed to pre-process config file: %d " "(mtype %d, moff 0x%x).\n", rc, mtype, moff); goto done; } finicsum = be32toh(caps.finicsum); cfcsum = be32toh(caps.cfcsum); /* actual */ if (finicsum != cfcsum) { device_printf(sc->dev, "WARNING: config file checksum mismatch: %08x %08x\n", finicsum, cfcsum); } sc->cfcsum = cfcsum; snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file); /* * Let the firmware know what features will (not) be used so it can tune * things accordingly. */ #define LIMIT_CAPS(x) do { \ caps.x##caps &= htobe16(caps_allowed->x##caps); \ } while (0) LIMIT_CAPS(nbm); LIMIT_CAPS(link); LIMIT_CAPS(switch); LIMIT_CAPS(nic); LIMIT_CAPS(toe); LIMIT_CAPS(rdma); LIMIT_CAPS(crypto); LIMIT_CAPS(iscsi); LIMIT_CAPS(fcoe); #undef LIMIT_CAPS if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) { /* * TOE and hashfilters are mutually exclusive. It is a config * file or firmware bug if both are reported as available. Try * to cope with the situation in non-debug builds by disabling * TOE. */ MPASS(caps.toecaps == 0); caps.toecaps = 0; caps.rdmacaps = 0; caps.iscsicaps = 0; } caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE); caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); if (rc != 0) { device_printf(sc->dev, "failed to process config file: %d.\n", rc); goto done; } t4_tweak_chip_settings(sc); set_params__pre_init(sc); /* get basic stuff going */ rc = -t4_fw_initialize(sc, sc->mbox); if (rc != 0) { device_printf(sc->dev, "fw_initialize failed: %d.\n", rc); goto done; } done: return (rc); } /* * Partition chip resources for use between various PFs, VFs, etc. */ static int partition_resources(struct adapter *sc) { char cfg_file[sizeof(t4_cfg_file)]; struct caps_allowed caps_allowed; int rc; bool fallback; /* Only the master driver gets to configure the chip resources. */ MPASS(sc->flags & MASTER_PF); #define COPY_CAPS(x) do { \ caps_allowed.x##caps = t4_##x##caps_allowed; \ } while (0) bzero(&caps_allowed, sizeof(caps_allowed)); COPY_CAPS(nbm); COPY_CAPS(link); COPY_CAPS(switch); COPY_CAPS(nic); COPY_CAPS(toe); COPY_CAPS(rdma); COPY_CAPS(crypto); COPY_CAPS(iscsi); COPY_CAPS(fcoe); fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true; snprintf(cfg_file, sizeof(cfg_file), "%s", t4_cfg_file); retry: rc = apply_cfg_and_initialize(sc, cfg_file, &caps_allowed); if (rc != 0 && fallback) { device_printf(sc->dev, "failed (%d) to configure card with \"%s\" profile, " "will fall back to a basic configuration and retry.\n", rc, cfg_file); snprintf(cfg_file, sizeof(cfg_file), "%s", BUILTIN_CF); bzero(&caps_allowed, sizeof(caps_allowed)); COPY_CAPS(switch); caps_allowed.niccaps = FW_CAPS_CONFIG_NIC; fallback = false; goto retry; } #undef COPY_CAPS return (rc); } /* * Retrieve parameters that are needed (or nice to have) very early. */ static int get_params__pre_init(struct adapter *sc) { int rc; uint32_t param[2], val[2]; t4_get_version_info(sc); snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); param[0] = FW_PARAM_DEV(PORTVEC); param[1] = FW_PARAM_DEV(CCLK); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query parameters (pre_init): %d.\n", rc); return (rc); } sc->params.portvec = val[0]; sc->params.nports = bitcount32(val[0]); sc->params.vpd.cclk = val[1]; /* Read device log parameters. */ rc = -t4_init_devlog_params(sc, 1); if (rc == 0) fixup_devlog_params(sc); else { device_printf(sc->dev, "failed to get devlog parameters: %d.\n", rc); rc = 0; /* devlog isn't critical for device operation */ } return (rc); } /* * Any params that need to be set before FW_INITIALIZE. */ static int set_params__pre_init(struct adapter *sc) { int rc = 0; uint32_t param, val; if (chip_id(sc) >= CHELSIO_T6) { param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT); val = 1; rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); /* firmwares < 1.20.1.0 do not have this param. */ if (rc == FW_EINVAL && sc->params.fw_vers < FW_VERSION32(1, 20, 1, 0)) { rc = 0; } if (rc != 0) { device_printf(sc->dev, "failed to enable high priority filters :%d.\n", rc); } } /* Enable opaque VIIDs with firmwares that support it. */ param = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN); val = 1; rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); if (rc == 0 && val == 1) sc->params.viid_smt_extn_support = true; else sc->params.viid_smt_extn_support = false; return (rc); } /* * Retrieve various parameters that are of interest to the driver. The device * has been initialized by the firmware at this point. */ static int get_params__post_init(struct adapter *sc) { int rc; uint32_t param[7], val[7]; struct fw_caps_config_cmd caps; param[0] = FW_PARAM_PFVF(IQFLINT_START); param[1] = FW_PARAM_PFVF(EQ_START); param[2] = FW_PARAM_PFVF(FILTER_START); param[3] = FW_PARAM_PFVF(FILTER_END); param[4] = FW_PARAM_PFVF(L2T_START); param[5] = FW_PARAM_PFVF(L2T_END); param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query parameters (post_init): %d.\n", rc); return (rc); } sc->sge.iq_start = val[0]; sc->sge.eq_start = val[1]; if ((int)val[3] > (int)val[2]) { sc->tids.ftid_base = val[2]; sc->tids.ftid_end = val[3]; sc->tids.nftids = val[3] - val[2] + 1; } sc->vres.l2t.start = val[4]; sc->vres.l2t.size = val[5] - val[4] + 1; KASSERT(sc->vres.l2t.size <= L2T_SIZE, ("%s: L2 table size (%u) larger than expected (%u)", __func__, sc->vres.l2t.size, L2T_SIZE)); sc->params.core_vdd = val[6]; param[0] = FW_PARAM_PFVF(IQFLINT_END); param[1] = FW_PARAM_PFVF(EQ_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query parameters (post_init2): %d.\n", rc); return (rc); } MPASS((int)val[0] >= sc->sge.iq_start); sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1; MPASS((int)val[1] >= sc->sge.eq_start); sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1; if (chip_id(sc) >= CHELSIO_T6) { sc->tids.tid_base = t4_read_reg(sc, A_LE_DB_ACTIVE_TABLE_START_INDEX); param[0] = FW_PARAM_PFVF(HPFILTER_START); param[1] = FW_PARAM_PFVF(HPFILTER_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query hpfilter parameters: %d.\n", rc); return (rc); } if ((int)val[1] > (int)val[0]) { sc->tids.hpftid_base = val[0]; sc->tids.hpftid_end = val[1]; sc->tids.nhpftids = val[1] - val[0] + 1; /* * These should go off if the layout changes and the * driver needs to catch up. */ MPASS(sc->tids.hpftid_base == 0); MPASS(sc->tids.tid_base == sc->tids.nhpftids); } param[0] = FW_PARAM_PFVF(RAWF_START); param[1] = FW_PARAM_PFVF(RAWF_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query rawf parameters: %d.\n", rc); return (rc); } if ((int)val[1] > (int)val[0]) { sc->rawf_base = val[0]; sc->nrawf = val[1] - val[0] + 1; } } /* * MPSBGMAP is queried separately because only recent firmwares support * it as a parameter and we don't want the compound query above to fail * on older firmwares. */ param[0] = FW_PARAM_DEV(MPSBGMAP); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.mps_bg_map = val[0]; else sc->params.mps_bg_map = 0; /* * Determine whether the firmware supports the filter2 work request. * This is queried separately for the same reason as MPSBGMAP above. */ param[0] = FW_PARAM_DEV(FILTER2_WR); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.filter2_wr_support = val[0] != 0; else sc->params.filter2_wr_support = 0; /* * Find out whether we're allowed to use the ULPTX MEMWRITE DSGL. * This is queried separately for the same reason as other params above. */ param[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); val[0] = 0; rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.ulptx_memwrite_dsgl = val[0] != 0; else sc->params.ulptx_memwrite_dsgl = false; /* FW_RI_FR_NSMR_TPTE_WR support */ param[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.fr_nsmr_tpte_wr_support = val[0] != 0; else sc->params.fr_nsmr_tpte_wr_support = false; /* Support for 512 SGL entries per FR MR. */ param[0] = FW_PARAM_DEV(DEV_512SGL_MR); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.dev_512sgl_mr = val[0] != 0; else sc->params.dev_512sgl_mr = false; param[0] = FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc == 0) sc->params.max_pkts_per_eth_tx_pkts_wr = val[0]; else sc->params.max_pkts_per_eth_tx_pkts_wr = 15; /* get capabilites */ bzero(&caps, sizeof(caps)); caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ); caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); if (rc != 0) { device_printf(sc->dev, "failed to get card capabilities: %d.\n", rc); return (rc); } #define READ_CAPS(x) do { \ sc->x = htobe16(caps.x); \ } while (0) READ_CAPS(nbmcaps); READ_CAPS(linkcaps); READ_CAPS(switchcaps); READ_CAPS(niccaps); READ_CAPS(toecaps); READ_CAPS(rdmacaps); READ_CAPS(cryptocaps); READ_CAPS(iscsicaps); READ_CAPS(fcoecaps); if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) { MPASS(chip_id(sc) > CHELSIO_T4); MPASS(sc->toecaps == 0); sc->toecaps = 0; param[0] = FW_PARAM_DEV(NTID); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query HASHFILTER parameters: %d.\n", rc); return (rc); } sc->tids.ntids = val[0]; if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) { MPASS(sc->tids.ntids >= sc->tids.nhpftids); sc->tids.ntids -= sc->tids.nhpftids; } sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); sc->params.hash_filter = 1; } if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { param[0] = FW_PARAM_PFVF(ETHOFLD_START); param[1] = FW_PARAM_PFVF(ETHOFLD_END); param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query NIC parameters: %d.\n", rc); return (rc); } if ((int)val[1] > (int)val[0]) { sc->tids.etid_base = val[0]; sc->tids.etid_end = val[1]; sc->tids.netids = val[1] - val[0] + 1; sc->params.eo_wr_cred = val[2]; sc->params.ethoffload = 1; } } if (sc->toecaps) { /* query offload-related parameters */ param[0] = FW_PARAM_DEV(NTID); param[1] = FW_PARAM_PFVF(SERVER_START); param[2] = FW_PARAM_PFVF(SERVER_END); param[3] = FW_PARAM_PFVF(TDDP_START); param[4] = FW_PARAM_PFVF(TDDP_END); param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query TOE parameters: %d.\n", rc); return (rc); } sc->tids.ntids = val[0]; if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) { MPASS(sc->tids.ntids >= sc->tids.nhpftids); sc->tids.ntids -= sc->tids.nhpftids; } sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); if ((int)val[2] > (int)val[1]) { sc->tids.stid_base = val[1]; sc->tids.nstids = val[2] - val[1] + 1; } sc->vres.ddp.start = val[3]; sc->vres.ddp.size = val[4] - val[3] + 1; sc->params.ofldq_wr_cred = val[5]; sc->params.offload = 1; } else { /* * The firmware attempts memfree TOE configuration for -SO cards * and will report toecaps=0 if it runs out of resources (this * depends on the config file). It may not report 0 for other * capabilities dependent on the TOE in this case. Set them to * 0 here so that the driver doesn't bother tracking resources * that will never be used. */ sc->iscsicaps = 0; sc->rdmacaps = 0; } if (sc->rdmacaps) { param[0] = FW_PARAM_PFVF(STAG_START); param[1] = FW_PARAM_PFVF(STAG_END); param[2] = FW_PARAM_PFVF(RQ_START); param[3] = FW_PARAM_PFVF(RQ_END); param[4] = FW_PARAM_PFVF(PBL_START); param[5] = FW_PARAM_PFVF(PBL_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query RDMA parameters(1): %d.\n", rc); return (rc); } sc->vres.stag.start = val[0]; sc->vres.stag.size = val[1] - val[0] + 1; sc->vres.rq.start = val[2]; sc->vres.rq.size = val[3] - val[2] + 1; sc->vres.pbl.start = val[4]; sc->vres.pbl.size = val[5] - val[4] + 1; param[0] = FW_PARAM_PFVF(SQRQ_START); param[1] = FW_PARAM_PFVF(SQRQ_END); param[2] = FW_PARAM_PFVF(CQ_START); param[3] = FW_PARAM_PFVF(CQ_END); param[4] = FW_PARAM_PFVF(OCQ_START); param[5] = FW_PARAM_PFVF(OCQ_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query RDMA parameters(2): %d.\n", rc); return (rc); } sc->vres.qp.start = val[0]; sc->vres.qp.size = val[1] - val[0] + 1; sc->vres.cq.start = val[2]; sc->vres.cq.size = val[3] - val[2] + 1; sc->vres.ocq.start = val[4]; sc->vres.ocq.size = val[5] - val[4] + 1; param[0] = FW_PARAM_PFVF(SRQ_START); param[1] = FW_PARAM_PFVF(SRQ_END); param[2] = FW_PARAM_DEV(MAXORDIRD_QP); param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query RDMA parameters(3): %d.\n", rc); return (rc); } sc->vres.srq.start = val[0]; sc->vres.srq.size = val[1] - val[0] + 1; sc->params.max_ordird_qp = val[2]; sc->params.max_ird_adapter = val[3]; } if (sc->iscsicaps) { param[0] = FW_PARAM_PFVF(ISCSI_START); param[1] = FW_PARAM_PFVF(ISCSI_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query iSCSI parameters: %d.\n", rc); return (rc); } sc->vres.iscsi.start = val[0]; sc->vres.iscsi.size = val[1] - val[0] + 1; } if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) { param[0] = FW_PARAM_PFVF(TLS_START); param[1] = FW_PARAM_PFVF(TLS_END); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); if (rc != 0) { device_printf(sc->dev, "failed to query TLS parameters: %d.\n", rc); return (rc); } sc->vres.key.start = val[0]; sc->vres.key.size = val[1] - val[0] + 1; } /* * We've got the params we wanted to query directly from the firmware. * Grab some others via other means. */ t4_init_sge_params(sc); t4_init_tp_params(sc); t4_read_mtu_tbl(sc, sc->params.mtus, NULL); t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); rc = t4_verify_chip_settings(sc); if (rc != 0) return (rc); t4_init_rx_buf_info(sc); return (rc); } #ifdef KERN_TLS static void ktls_tick(void *arg) { struct adapter *sc; uint32_t tstamp; sc = arg; if (sc->flags & KERN_TLS_ON) { tstamp = tcp_ts_getticks(); t4_write_reg(sc, A_TP_SYNC_TIME_HI, tstamp >> 1); t4_write_reg(sc, A_TP_SYNC_TIME_LO, tstamp << 31); } callout_schedule_sbt(&sc->ktls_tick, SBT_1MS, 0, C_HARDCLOCK); } static int t4_config_kern_tls(struct adapter *sc, bool enable) { int rc; uint32_t param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_KTLS_HW) | V_FW_PARAMS_PARAM_Y(enable ? 1 : 0) | V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE); rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, ¶m); if (rc != 0) { CH_ERR(sc, "failed to %s NIC TLS: %d\n", enable ? "enable" : "disable", rc); return (rc); } if (enable) sc->flags |= KERN_TLS_ON; else sc->flags &= ~KERN_TLS_ON; return (rc); } #endif static int set_params__post_init(struct adapter *sc) { uint32_t mask, param, val; #ifdef TCP_OFFLOAD int i, v, shift; #endif /* ask for encapsulated CPLs */ param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); val = 1; (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); /* Enable 32b port caps if the firmware supports it. */ param = FW_PARAM_PFVF(PORT_CAPS32); val = 1; if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val) == 0) sc->params.port_caps32 = 1; /* Let filter + maskhash steer to a part of the VI's RSS region. */ val = 1 << (G_MASKSIZE(t4_read_reg(sc, A_TP_RSS_CONFIG_TNL)) - 1); t4_set_reg_field(sc, A_TP_RSS_CONFIG_TNL, V_MASKFILTER(M_MASKFILTER), V_MASKFILTER(val - 1)); mask = F_DROPERRORANY | F_DROPERRORMAC | F_DROPERRORIPVER | F_DROPERRORFRAG | F_DROPERRORATTACK | F_DROPERRORETHHDRLEN | F_DROPERRORIPHDRLEN | F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN | F_DROPERRORTCPOPT | F_DROPERRORCSUMIP | F_DROPERRORCSUM; val = 0; if (chip_id(sc) < CHELSIO_T6 && t4_attack_filter != 0) { t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_ATTACKFILTERENABLE, F_ATTACKFILTERENABLE); val |= F_DROPERRORATTACK; } if (t4_drop_ip_fragments != 0) { t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_FRAGMENTDROP, F_FRAGMENTDROP); val |= F_DROPERRORFRAG; } if (t4_drop_pkts_with_l2_errors != 0) val |= F_DROPERRORMAC | F_DROPERRORETHHDRLEN; if (t4_drop_pkts_with_l3_errors != 0) { val |= F_DROPERRORIPVER | F_DROPERRORIPHDRLEN | F_DROPERRORCSUMIP; } if (t4_drop_pkts_with_l4_errors != 0) { val |= F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN | F_DROPERRORTCPOPT | F_DROPERRORCSUM; } t4_set_reg_field(sc, A_TP_ERR_CONFIG, mask, val); #ifdef TCP_OFFLOAD /* * Override the TOE timers with user provided tunables. This is not the * recommended way to change the timers (the firmware config file is) so * these tunables are not documented. * * All the timer tunables are in microseconds. */ if (t4_toe_keepalive_idle != 0) { v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle); v &= M_KEEPALIVEIDLE; t4_set_reg_field(sc, A_TP_KEEP_IDLE, V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v)); } if (t4_toe_keepalive_interval != 0) { v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval); v &= M_KEEPALIVEINTVL; t4_set_reg_field(sc, A_TP_KEEP_INTVL, V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v)); } if (t4_toe_keepalive_count != 0) { v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2; t4_set_reg_field(sc, A_TP_SHIFT_CNT, V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) | V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2), V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v)); } if (t4_toe_rexmt_min != 0) { v = us_to_tcp_ticks(sc, t4_toe_rexmt_min); v &= M_RXTMIN; t4_set_reg_field(sc, A_TP_RXT_MIN, V_RXTMIN(M_RXTMIN), V_RXTMIN(v)); } if (t4_toe_rexmt_max != 0) { v = us_to_tcp_ticks(sc, t4_toe_rexmt_max); v &= M_RXTMAX; t4_set_reg_field(sc, A_TP_RXT_MAX, V_RXTMAX(M_RXTMAX), V_RXTMAX(v)); } if (t4_toe_rexmt_count != 0) { v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2; t4_set_reg_field(sc, A_TP_SHIFT_CNT, V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) | V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2), V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v)); } for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) { if (t4_toe_rexmt_backoff[i] != -1) { v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0; shift = (i & 3) << 3; t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3), M_TIMERBACKOFFINDEX0 << shift, v << shift); } } #endif #ifdef KERN_TLS if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS && sc->toecaps & FW_CAPS_CONFIG_TOE) { /* * Limit TOE connections to 2 reassembly "islands". This is * required for TOE TLS connections to downgrade to plain TOE * connections if an unsupported TLS version or ciphersuite is * used. */ t4_tp_wr_bits_indirect(sc, A_TP_FRAG_CONFIG, V_PASSMODE(M_PASSMODE), V_PASSMODE(2)); if (is_ktls(sc)) { sc->tlst.inline_keys = t4_tls_inline_keys; sc->tlst.combo_wrs = t4_tls_combo_wrs; if (t4_kern_tls != 0) t4_config_kern_tls(sc, true); } } #endif return (0); } #undef FW_PARAM_PFVF #undef FW_PARAM_DEV static void t4_set_desc(struct adapter *sc) { char buf[128]; struct adapter_params *p = &sc->params; snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id); device_set_desc_copy(sc->dev, buf); } static inline void ifmedia_add4(struct ifmedia *ifm, int m) { ifmedia_add(ifm, m, 0, NULL); ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL); ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL); ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL); } /* * This is the selected media, which is not quite the same as the active media. * The media line in ifconfig is "media: Ethernet selected (active)" if selected * and active are not the same, and "media: Ethernet selected" otherwise. */ static void set_current_media(struct port_info *pi) { struct link_config *lc; struct ifmedia *ifm; int mword; u_int speed; PORT_LOCK_ASSERT_OWNED(pi); /* Leave current media alone if it's already set to IFM_NONE. */ ifm = &pi->media; if (ifm->ifm_cur != NULL && IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE) return; lc = &pi->link_cfg; if (lc->requested_aneg != AUTONEG_DISABLE && lc->pcaps & FW_PORT_CAP32_ANEG) { ifmedia_set(ifm, IFM_ETHER | IFM_AUTO); return; } mword = IFM_ETHER | IFM_FDX; if (lc->requested_fc & PAUSE_TX) mword |= IFM_ETH_TXPAUSE; if (lc->requested_fc & PAUSE_RX) mword |= IFM_ETH_RXPAUSE; if (lc->requested_speed == 0) speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */ else speed = lc->requested_speed; mword |= port_mword(pi, speed_to_fwcap(speed)); ifmedia_set(ifm, mword); } /* * Returns true if the ifmedia list for the port cannot change. */ static bool fixed_ifmedia(struct port_info *pi) { return (pi->port_type == FW_PORT_TYPE_BT_SGMII || pi->port_type == FW_PORT_TYPE_BT_XFI || pi->port_type == FW_PORT_TYPE_BT_XAUI || pi->port_type == FW_PORT_TYPE_KX4 || pi->port_type == FW_PORT_TYPE_KX || pi->port_type == FW_PORT_TYPE_KR || pi->port_type == FW_PORT_TYPE_BP_AP || pi->port_type == FW_PORT_TYPE_BP4_AP || pi->port_type == FW_PORT_TYPE_BP40_BA || pi->port_type == FW_PORT_TYPE_KR4_100G || pi->port_type == FW_PORT_TYPE_KR_SFP28 || pi->port_type == FW_PORT_TYPE_KR_XLAUI); } static void build_medialist(struct port_info *pi) { uint32_t ss, speed; int unknown, mword, bit; struct link_config *lc; struct ifmedia *ifm; PORT_LOCK_ASSERT_OWNED(pi); if (pi->flags & FIXED_IFMEDIA) return; /* * Rebuild the ifmedia list. */ ifm = &pi->media; ifmedia_removeall(ifm); lc = &pi->link_cfg; ss = G_FW_PORT_CAP32_SPEED(lc->pcaps); /* Supported Speeds */ if (__predict_false(ss == 0)) { /* not supposed to happen. */ MPASS(ss != 0); no_media: MPASS(LIST_EMPTY(&ifm->ifm_list)); ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL); ifmedia_set(ifm, IFM_ETHER | IFM_NONE); return; } unknown = 0; for (bit = S_FW_PORT_CAP32_SPEED; bit < fls(ss); bit++) { speed = 1 << bit; MPASS(speed & M_FW_PORT_CAP32_SPEED); if (ss & speed) { mword = port_mword(pi, speed); if (mword == IFM_NONE) { goto no_media; } else if (mword == IFM_UNKNOWN) unknown++; else ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword); } } if (unknown > 0) /* Add one unknown for all unknown media types. */ ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN); if (lc->pcaps & FW_PORT_CAP32_ANEG) ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL); set_current_media(pi); } /* * Initialize the requested fields in the link config based on driver tunables. */ static void init_link_config(struct port_info *pi) { struct link_config *lc = &pi->link_cfg; PORT_LOCK_ASSERT_OWNED(pi); lc->requested_speed = 0; if (t4_autoneg == 0) lc->requested_aneg = AUTONEG_DISABLE; else if (t4_autoneg == 1) lc->requested_aneg = AUTONEG_ENABLE; else lc->requested_aneg = AUTONEG_AUTO; lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG); if (t4_fec & FEC_AUTO) lc->requested_fec = FEC_AUTO; else if (t4_fec == 0) lc->requested_fec = FEC_NONE; else { /* -1 is handled by the FEC_AUTO block above and not here. */ lc->requested_fec = t4_fec & (FEC_RS | FEC_BASER_RS | FEC_NONE | FEC_MODULE); if (lc->requested_fec == 0) lc->requested_fec = FEC_AUTO; } } /* * Makes sure that all requested settings comply with what's supported by the * port. Returns the number of settings that were invalid and had to be fixed. */ static int fixup_link_config(struct port_info *pi) { int n = 0; struct link_config *lc = &pi->link_cfg; uint32_t fwspeed; PORT_LOCK_ASSERT_OWNED(pi); /* Speed (when not autonegotiating) */ if (lc->requested_speed != 0) { fwspeed = speed_to_fwcap(lc->requested_speed); if ((fwspeed & lc->pcaps) == 0) { n++; lc->requested_speed = 0; } } /* Link autonegotiation */ MPASS(lc->requested_aneg == AUTONEG_ENABLE || lc->requested_aneg == AUTONEG_DISABLE || lc->requested_aneg == AUTONEG_AUTO); if (lc->requested_aneg == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) { n++; lc->requested_aneg = AUTONEG_AUTO; } /* Flow control */ MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0); if (lc->requested_fc & PAUSE_TX && !(lc->pcaps & FW_PORT_CAP32_FC_TX)) { n++; lc->requested_fc &= ~PAUSE_TX; } if (lc->requested_fc & PAUSE_RX && !(lc->pcaps & FW_PORT_CAP32_FC_RX)) { n++; lc->requested_fc &= ~PAUSE_RX; } if (!(lc->requested_fc & PAUSE_AUTONEG) && !(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)) { n++; lc->requested_fc |= PAUSE_AUTONEG; } /* FEC */ if ((lc->requested_fec & FEC_RS && !(lc->pcaps & FW_PORT_CAP32_FEC_RS)) || (lc->requested_fec & FEC_BASER_RS && !(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS))) { n++; lc->requested_fec = FEC_AUTO; } return (n); } /* * Apply the requested L1 settings, which are expected to be valid, to the * hardware. */ static int apply_link_config(struct port_info *pi) { struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; #ifdef INVARIANTS ASSERT_SYNCHRONIZED_OP(sc); PORT_LOCK_ASSERT_OWNED(pi); if (lc->requested_aneg == AUTONEG_ENABLE) MPASS(lc->pcaps & FW_PORT_CAP32_ANEG); if (!(lc->requested_fc & PAUSE_AUTONEG)) MPASS(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE); if (lc->requested_fc & PAUSE_TX) MPASS(lc->pcaps & FW_PORT_CAP32_FC_TX); if (lc->requested_fc & PAUSE_RX) MPASS(lc->pcaps & FW_PORT_CAP32_FC_RX); if (lc->requested_fec & FEC_RS) MPASS(lc->pcaps & FW_PORT_CAP32_FEC_RS); if (lc->requested_fec & FEC_BASER_RS) MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS); #endif rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); if (rc != 0) { /* Don't complain if the VF driver gets back an EPERM. */ if (!(sc->flags & IS_VF) || rc != FW_EPERM) device_printf(pi->dev, "l1cfg failed: %d\n", rc); } else { /* * An L1_CFG will almost always result in a link-change event if * the link is up, and the driver will refresh the actual * fec/fc/etc. when the notification is processed. If the link * is down then the actual settings are meaningless. * * This takes care of the case where a change in the L1 settings * may not result in a notification. */ if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG)) lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX); } return (rc); } #define FW_MAC_EXACT_CHUNK 7 struct mcaddr_ctx { struct ifnet *ifp; const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; uint64_t hash; int i; int del; int rc; }; static u_int add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct mcaddr_ctx *ctx = arg; struct vi_info *vi = ctx->ifp->if_softc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; if (ctx->rc < 0) return (0); ctx->mcaddr[ctx->i] = LLADDR(sdl); MPASS(ETHER_IS_MULTICAST(ctx->mcaddr[ctx->i])); ctx->i++; if (ctx->i == FW_MAC_EXACT_CHUNK) { ctx->rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx->del, ctx->i, ctx->mcaddr, NULL, &ctx->hash, 0); if (ctx->rc < 0) { int j; for (j = 0; j < ctx->i; j++) { if_printf(ctx->ifp, "failed to add mc address" " %02x:%02x:%02x:" "%02x:%02x:%02x rc=%d\n", ctx->mcaddr[j][0], ctx->mcaddr[j][1], ctx->mcaddr[j][2], ctx->mcaddr[j][3], ctx->mcaddr[j][4], ctx->mcaddr[j][5], -ctx->rc); } return (0); } ctx->del = 0; ctx->i = 0; } return (1); } /* * Program the port's XGMAC based on parameters in ifnet. The caller also * indicates which parameters should be programmed (the rest are left alone). */ int update_mac_settings(struct ifnet *ifp, int flags) { int rc = 0; struct vi_info *vi = ifp->if_softc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; uint8_t match_all_mac[ETHER_ADDR_LEN] = {0}; ASSERT_SYNCHRONIZED_OP(sc); KASSERT(flags, ("%s: not told what to update.", __func__)); if (flags & XGMAC_MTU) mtu = ifp->if_mtu; if (flags & XGMAC_PROMISC) promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; if (flags & XGMAC_ALLMULTI) allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; if (flags & XGMAC_VLANEX) vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, allmulti, 1, vlanex, false); if (rc) { if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc); return (rc); } } if (flags & XGMAC_UCADDR) { uint8_t ucaddr[ETHER_ADDR_LEN]; bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, ucaddr, true, &vi->smt_idx); if (rc < 0) { rc = -rc; if_printf(ifp, "change_mac failed: %d\n", rc); return (rc); } else { vi->xact_addr_filt = rc; rc = 0; } } if (flags & XGMAC_MCADDRS) { struct epoch_tracker et; struct mcaddr_ctx ctx; int j; ctx.ifp = ifp; ctx.hash = 0; ctx.i = 0; ctx.del = 1; ctx.rc = 0; /* * Unlike other drivers, we accumulate list of pointers into * interface address lists and we need to keep it safe even * after if_foreach_llmaddr() returns, thus we must enter the * network epoch. */ NET_EPOCH_ENTER(et); if_foreach_llmaddr(ifp, add_maddr, &ctx); if (ctx.rc < 0) { NET_EPOCH_EXIT(et); rc = -ctx.rc; return (rc); } if (ctx.i > 0) { rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, ctx.del, ctx.i, ctx.mcaddr, NULL, &ctx.hash, 0); NET_EPOCH_EXIT(et); if (rc < 0) { rc = -rc; for (j = 0; j < ctx.i; j++) { if_printf(ifp, "failed to add mcast address" " %02x:%02x:%02x:" "%02x:%02x:%02x rc=%d\n", ctx.mcaddr[j][0], ctx.mcaddr[j][1], ctx.mcaddr[j][2], ctx.mcaddr[j][3], ctx.mcaddr[j][4], ctx.mcaddr[j][5], rc); } return (rc); } ctx.del = 0; } else NET_EPOCH_EXIT(et); rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, ctx.hash, 0); if (rc != 0) if_printf(ifp, "failed to set mcast address hash: %d\n", rc); if (ctx.del == 0) { /* We clobbered the VXLAN entry if there was one. */ pi->vxlan_tcam_entry = false; } } if (IS_MAIN_VI(vi) && sc->vxlan_refcount > 0 && pi->vxlan_tcam_entry == false) { rc = t4_alloc_raw_mac_filt(sc, vi->viid, match_all_mac, match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id, true); if (rc < 0) { rc = -rc; if_printf(ifp, "failed to add VXLAN TCAM entry: %d.\n", rc); } else { MPASS(rc == sc->rawf_base + pi->port_id); rc = 0; pi->vxlan_tcam_entry = true; } } return (rc); } /* * {begin|end}_synchronized_op must be called from the same thread. */ int begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, char *wmesg) { int rc, pri; #ifdef WITNESS /* the caller thinks it's ok to sleep, but is it really? */ if (flags & SLEEP_OK) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "begin_synchronized_op"); #endif if (INTR_OK) pri = PCATCH; else pri = 0; ADAPTER_LOCK(sc); for (;;) { if (vi && IS_DOOMED(vi)) { rc = ENXIO; goto done; } if (!IS_BUSY(sc)) { rc = 0; break; } if (!(flags & SLEEP_OK)) { rc = EBUSY; goto done; } if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { rc = EINTR; goto done; } } KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); SET_BUSY(sc); #ifdef INVARIANTS sc->last_op = wmesg; sc->last_op_thr = curthread; sc->last_op_flags = flags; #endif done: if (!(flags & HOLD_LOCK) || rc) ADAPTER_UNLOCK(sc); return (rc); } /* * Tell if_ioctl and if_init that the VI is going away. This is * special variant of begin_synchronized_op and must be paired with a * call to end_synchronized_op. */ void doom_vi(struct adapter *sc, struct vi_info *vi) { ADAPTER_LOCK(sc); SET_DOOMED(vi); wakeup(&sc->flags); while (IS_BUSY(sc)) mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); SET_BUSY(sc); #ifdef INVARIANTS sc->last_op = "t4detach"; sc->last_op_thr = curthread; sc->last_op_flags = 0; #endif ADAPTER_UNLOCK(sc); } /* * {begin|end}_synchronized_op must be called from the same thread. */ void end_synchronized_op(struct adapter *sc, int flags) { if (flags & LOCK_HELD) ADAPTER_LOCK_ASSERT_OWNED(sc); else ADAPTER_LOCK(sc); KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); CLR_BUSY(sc); wakeup(&sc->flags); ADAPTER_UNLOCK(sc); } static int cxgbe_init_synchronized(struct vi_info *vi) { struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct ifnet *ifp = vi->ifp; int rc = 0, i; struct sge_txq *txq; ASSERT_SYNCHRONIZED_OP(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) return (0); /* already running */ if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0)) return (rc); /* error message displayed already */ if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0)) return (rc); /* error message displayed already */ rc = update_mac_settings(ifp, XGMAC_ALL); if (rc) goto done; /* error message displayed already */ PORT_LOCK(pi); if (pi->up_vis == 0) { t4_update_port_info(pi); fixup_link_config(pi); build_medialist(pi); apply_link_config(pi); } rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); if (rc != 0) { if_printf(ifp, "enable_vi failed: %d\n", rc); PORT_UNLOCK(pi); goto done; } /* * Can't fail from this point onwards. Review cxgbe_uninit_synchronized * if this changes. */ for_each_txq(vi, i, txq) { TXQ_LOCK(txq); txq->eq.flags |= EQ_ENABLED; TXQ_UNLOCK(txq); } /* * The first iq of the first port to come up is used for tracing. */ if (sc->traceq < 0 && IS_MAIN_VI(vi)) { sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | V_QUEUENUMBER(sc->traceq)); pi->flags |= HAS_TRACEQ; } /* all ok */ pi->up_vis++; ifp->if_drv_flags |= IFF_DRV_RUNNING; if (pi->link_cfg.link_ok) t4_os_link_changed(pi); PORT_UNLOCK(pi); mtx_lock(&vi->tick_mtx); if (ifp->if_get_counter == vi_get_counter) callout_reset(&vi->tick, hz, vi_tick, vi); else callout_reset(&vi->tick, hz, cxgbe_tick, vi); mtx_unlock(&vi->tick_mtx); done: if (rc != 0) cxgbe_uninit_synchronized(vi); return (rc); } /* * Idempotent. */ static int cxgbe_uninit_synchronized(struct vi_info *vi) { struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; struct ifnet *ifp = vi->ifp; int rc, i; struct sge_txq *txq; ASSERT_SYNCHRONIZED_OP(sc); if (!(vi->flags & VI_INIT_DONE)) { if (__predict_false(ifp->if_drv_flags & IFF_DRV_RUNNING)) { KASSERT(0, ("uninited VI is running")); if_printf(ifp, "uninited VI with running ifnet. " "vi->flags 0x%016lx, if_flags 0x%08x, " "if_drv_flags 0x%08x\n", vi->flags, ifp->if_flags, ifp->if_drv_flags); } return (0); } /* * Disable the VI so that all its data in either direction is discarded * by the MPS. Leave everything else (the queues, interrupts, and 1Hz * tick) intact as the TP can deliver negative advice or data that it's * holding in its RAM (for an offloaded connection) even after the VI is * disabled. */ rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); if (rc) { if_printf(ifp, "disable_vi failed: %d\n", rc); return (rc); } for_each_txq(vi, i, txq) { TXQ_LOCK(txq); txq->eq.flags &= ~EQ_ENABLED; TXQ_UNLOCK(txq); } mtx_lock(&vi->tick_mtx); callout_stop(&vi->tick); mtx_unlock(&vi->tick_mtx); PORT_LOCK(pi); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { PORT_UNLOCK(pi); return (0); } ifp->if_drv_flags &= ~IFF_DRV_RUNNING; pi->up_vis--; if (pi->up_vis > 0) { PORT_UNLOCK(pi); return (0); } pi->link_cfg.link_ok = false; pi->link_cfg.speed = 0; pi->link_cfg.link_down_rc = 255; t4_os_link_changed(pi); PORT_UNLOCK(pi); return (0); } /* * It is ok for this function to fail midway and return right away. t4_detach * will walk the entire sc->irq list and clean up whatever is valid. */ int t4_setup_intr_handlers(struct adapter *sc) { int rc, rid, p, q, v; char s[8]; struct irq *irq; struct port_info *pi; struct vi_info *vi; struct sge *sge = &sc->sge; struct sge_rxq *rxq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif #ifdef DEV_NETMAP struct sge_nm_rxq *nm_rxq; #endif #ifdef RSS int nbuckets = rss_getnumbuckets(); #endif /* * Setup interrupts. */ irq = &sc->irq[0]; rid = sc->intr_type == INTR_INTX ? 0 : 1; if (forwarding_intr_to_fwq(sc)) return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); /* Multiple interrupts. */ if (sc->flags & IS_VF) KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, ("%s: too few intr.", __func__)); else KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, ("%s: too few intr.", __func__)); /* The first one is always error intr on PFs */ if (!(sc->flags & IS_VF)) { rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); if (rc != 0) return (rc); irq++; rid++; } /* The second one is always the firmware event queue (first on VFs) */ rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); if (rc != 0) return (rc); irq++; rid++; for_each_port(sc, p) { pi = sc->port[p]; for_each_vi(pi, v, vi) { vi->first_intr = rid - 1; if (vi->nnmrxq > 0) { int n = max(vi->nrxq, vi->nnmrxq); rxq = &sge->rxq[vi->first_rxq]; #ifdef DEV_NETMAP nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; #endif for (q = 0; q < n; q++) { snprintf(s, sizeof(s), "%x%c%x", p, 'a' + v, q); if (q < vi->nrxq) irq->rxq = rxq++; #ifdef DEV_NETMAP if (q < vi->nnmrxq) irq->nm_rxq = nm_rxq++; if (irq->nm_rxq != NULL && irq->rxq == NULL) { /* Netmap rx only */ rc = t4_alloc_irq(sc, irq, rid, t4_nm_intr, irq->nm_rxq, s); } if (irq->nm_rxq != NULL && irq->rxq != NULL) { /* NIC and Netmap rx */ rc = t4_alloc_irq(sc, irq, rid, t4_vi_intr, irq, s); } #endif if (irq->rxq != NULL && irq->nm_rxq == NULL) { /* NIC rx only */ rc = t4_alloc_irq(sc, irq, rid, t4_intr, irq->rxq, s); } if (rc != 0) return (rc); #ifdef RSS if (q < vi->nrxq) { bus_bind_intr(sc->dev, irq->res, rss_getcpu(q % nbuckets)); } #endif irq++; rid++; vi->nintr++; } } else { for_each_rxq(vi, q, rxq) { snprintf(s, sizeof(s), "%x%c%x", p, 'a' + v, q); rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq, s); if (rc != 0) return (rc); #ifdef RSS bus_bind_intr(sc->dev, irq->res, rss_getcpu(q % nbuckets)); #endif irq++; rid++; vi->nintr++; } } #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, q, ofld_rxq) { snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q); rc = t4_alloc_irq(sc, irq, rid, t4_intr, ofld_rxq, s); if (rc != 0) return (rc); irq++; rid++; vi->nintr++; } #endif } } MPASS(irq == &sc->irq[sc->intr_count]); return (0); } static void write_global_rss_key(struct adapter *sc) { #ifdef RSS int i; uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; CTASSERT(RSS_KEYSIZE == 40); rss_getkey((void *)&raw_rss_key[0]); for (i = 0; i < nitems(rss_key); i++) { rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); } t4_write_rss_key(sc, &rss_key[0], -1, 1); #endif } /* * Idempotent. */ static int adapter_full_init(struct adapter *sc) { int rc, i; ASSERT_SYNCHRONIZED_OP(sc); if (!(sc->flags & ADAP_SYSCTL_CTX)) { sysctl_ctx_init(&sc->ctx); sc->flags |= ADAP_SYSCTL_CTX; } /* * queues that belong to the adapter (not any particular port). */ rc = t4_setup_adapter_queues(sc); if (rc != 0) return (rc); for (i = 0; i < nitems(sc->tq); i++) { if (sc->tq[i] != NULL) continue; sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->tq[i]); if (sc->tq[i] == NULL) { CH_ERR(sc, "failed to allocate task queue %d\n", i); return (ENOMEM); } taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", device_get_nameunit(sc->dev), i); } if (!(sc->flags & IS_VF)) { write_global_rss_key(sc); t4_intr_enable(sc); } #ifdef KERN_TLS if (is_ktls(sc)) callout_reset_sbt(&sc->ktls_tick, SBT_1MS, 0, ktls_tick, sc, C_HARDCLOCK); #endif return (0); } int adapter_init(struct adapter *sc) { int rc; ASSERT_SYNCHRONIZED_OP(sc); ADAPTER_LOCK_ASSERT_NOTOWNED(sc); KASSERT((sc->flags & FULL_INIT_DONE) == 0, ("%s: FULL_INIT_DONE already", __func__)); rc = adapter_full_init(sc); if (rc != 0) adapter_full_uninit(sc); else sc->flags |= FULL_INIT_DONE; return (rc); } /* * Idempotent. */ static void adapter_full_uninit(struct adapter *sc) { int i; /* Do this before freeing the adapter queues. */ if (sc->flags & ADAP_SYSCTL_CTX) { sysctl_ctx_free(&sc->ctx); sc->flags &= ~ADAP_SYSCTL_CTX; } t4_teardown_adapter_queues(sc); for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { taskqueue_free(sc->tq[i]); sc->tq[i] = NULL; } sc->flags &= ~FULL_INIT_DONE; } #ifdef RSS #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ RSS_HASHTYPE_RSS_UDP_IPV6) /* Translates kernel hash types to hardware. */ static int hashconfig_to_hashen(int hashconfig) { int hashen = 0; if (hashconfig & RSS_HASHTYPE_RSS_IPV4) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; if (hashconfig & RSS_HASHTYPE_RSS_IPV6) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; } if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; } if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; return (hashen); } /* Translates hardware hash types to kernel. */ static int hashen_to_hashconfig(int hashen) { int hashconfig = 0; if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { /* * If UDP hashing was enabled it must have been enabled for * either IPv4 or IPv6 (inclusive or). Enabling UDP without * enabling any 4-tuple hash is nonsense configuration. */ MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; } if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) hashconfig |= RSS_HASHTYPE_RSS_IPV4; if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) hashconfig |= RSS_HASHTYPE_RSS_IPV6; return (hashconfig); } #endif /* * Idempotent. */ static int vi_full_init(struct vi_info *vi) { struct adapter *sc = vi->adapter; struct sge_rxq *rxq; int rc, i, j; #ifdef RSS int nbuckets = rss_getnumbuckets(); int hashconfig = rss_gethashconfig(); int extra; #endif ASSERT_SYNCHRONIZED_OP(sc); if (!(vi->flags & VI_SYSCTL_CTX)) { sysctl_ctx_init(&vi->ctx); vi->flags |= VI_SYSCTL_CTX; } /* * Allocate tx/rx/fl queues for this VI. */ rc = t4_setup_vi_queues(vi); if (rc != 0) return (rc); /* * Setup RSS for this VI. Save a copy of the RSS table for later use. */ if (vi->nrxq > vi->rss_size) { CH_ALERT(vi, "nrxq (%d) > hw RSS table size (%d); " "some queues will never receive traffic.\n", vi->nrxq, vi->rss_size); } else if (vi->rss_size % vi->nrxq) { CH_ALERT(vi, "nrxq (%d), hw RSS table size (%d); " "expect uneven traffic distribution.\n", vi->nrxq, vi->rss_size); } #ifdef RSS if (vi->nrxq != nbuckets) { CH_ALERT(vi, "nrxq (%d) != kernel RSS buckets (%d);" "performance will be impacted.\n", vi->nrxq, nbuckets); } #endif if (vi->rss == NULL) vi->rss = malloc(vi->rss_size * sizeof (*vi->rss), M_CXGBE, M_ZERO | M_WAITOK); for (i = 0; i < vi->rss_size;) { #ifdef RSS j = rss_get_indirection_to_bucket(i); j %= vi->nrxq; rxq = &sc->sge.rxq[vi->first_rxq + j]; vi->rss[i++] = rxq->iq.abs_id; #else for_each_rxq(vi, j, rxq) { vi->rss[i++] = rxq->iq.abs_id; if (i == vi->rss_size) break; } #endif } rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, vi->rss, vi->rss_size); if (rc != 0) { CH_ERR(vi, "rss_config failed: %d\n", rc); return (rc); } #ifdef RSS vi->hashen = hashconfig_to_hashen(hashconfig); /* * We may have had to enable some hashes even though the global config * wants them disabled. This is a potential problem that must be * reported to the user. */ extra = hashen_to_hashconfig(vi->hashen) ^ hashconfig; /* * If we consider only the supported hash types, then the enabled hashes * are a superset of the requested hashes. In other words, there cannot * be any supported hash that was requested but not enabled, but there * can be hashes that were not requested but had to be enabled. */ extra &= SUPPORTED_RSS_HASHTYPES; MPASS((extra & hashconfig) == 0); if (extra) { CH_ALERT(vi, "global RSS config (0x%x) cannot be accommodated.\n", hashconfig); } if (extra & RSS_HASHTYPE_RSS_IPV4) CH_ALERT(vi, "IPv4 2-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) CH_ALERT(vi, "TCP/IPv4 4-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_IPV6) CH_ALERT(vi, "IPv6 2-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) CH_ALERT(vi, "TCP/IPv6 4-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) CH_ALERT(vi, "UDP/IPv4 4-tuple hashing forced on.\n"); if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) CH_ALERT(vi, "UDP/IPv6 4-tuple hashing forced on.\n"); #else vi->hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; #endif rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, vi->rss[0], 0, 0); if (rc != 0) { CH_ERR(vi, "rss hash/defaultq config failed: %d\n", rc); return (rc); } return (0); } int vi_init(struct vi_info *vi) { int rc; ASSERT_SYNCHRONIZED_OP(vi->adapter); KASSERT((vi->flags & VI_INIT_DONE) == 0, ("%s: VI_INIT_DONE already", __func__)); rc = vi_full_init(vi); if (rc != 0) vi_full_uninit(vi); else vi->flags |= VI_INIT_DONE; return (rc); } /* * Idempotent. */ static void vi_full_uninit(struct vi_info *vi) { if (vi->flags & VI_INIT_DONE) { quiesce_vi(vi); free(vi->rss, M_CXGBE); free(vi->nm_rss, M_CXGBE); } /* Do this before freeing the VI queues. */ if (vi->flags & VI_SYSCTL_CTX) { sysctl_ctx_free(&vi->ctx); vi->flags &= ~VI_SYSCTL_CTX; } t4_teardown_vi_queues(vi); vi->flags &= ~VI_INIT_DONE; } static void quiesce_txq(struct sge_txq *txq) { struct sge_eq *eq = &txq->eq; struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; MPASS(eq->flags & EQ_SW_ALLOCATED); MPASS(!(eq->flags & EQ_ENABLED)); /* Wait for the mp_ring to empty. */ while (!mp_ring_is_idle(txq->r)) { mp_ring_check_drainage(txq->r, 4096); pause("rquiesce", 1); } MPASS(txq->txp.npkt == 0); if (eq->flags & EQ_HW_ALLOCATED) { /* * Hardware is alive and working normally. Wait for it to * finish and then wait for the driver to catch up and reclaim * all descriptors. */ while (spg->cidx != htobe16(eq->pidx)) pause("equiesce", 1); while (eq->cidx != eq->pidx) pause("dquiesce", 1); } else { /* * Hardware is unavailable. Discard all pending tx and reclaim * descriptors directly. */ TXQ_LOCK(txq); while (eq->cidx != eq->pidx) { struct mbuf *m, *nextpkt; struct tx_sdesc *txsd; txsd = &txq->sdesc[eq->cidx]; for (m = txsd->m; m != NULL; m = nextpkt) { nextpkt = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); } IDXINCR(eq->cidx, txsd->desc_used, eq->sidx); } spg->pidx = spg->cidx = htobe16(eq->cidx); TXQ_UNLOCK(txq); } } static void quiesce_wrq(struct sge_wrq *wrq) { /* XXXTX */ } static void quiesce_iq_fl(struct adapter *sc, struct sge_iq *iq, struct sge_fl *fl) { /* Synchronize with the interrupt handler */ while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) pause("iqfree", 1); if (fl != NULL) { MPASS(iq->flags & IQ_HAS_FL); mtx_lock(&sc->sfl_lock); FL_LOCK(fl); fl->flags |= FL_DOOMED; FL_UNLOCK(fl); callout_stop(&sc->sfl_callout); mtx_unlock(&sc->sfl_lock); KASSERT((fl->flags & FL_STARVING) == 0, ("%s: still starving", __func__)); /* Release all buffers if hardware is no longer available. */ if (!(iq->flags & IQ_HW_ALLOCATED)) free_fl_buffers(sc, fl); } } /* * Wait for all activity on all the queues of the VI to complete. It is assumed * that no new work is being enqueued by the hardware or the driver. That part * should be arranged before calling this function. */ static void quiesce_vi(struct vi_info *vi) { int i; struct adapter *sc = vi->adapter; struct sge_rxq *rxq; struct sge_txq *txq; #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) struct sge_ofld_txq *ofld_txq; #endif if (!(vi->flags & VI_INIT_DONE)) return; for_each_txq(vi, i, txq) { quiesce_txq(txq); } #if defined(TCP_OFFLOAD) || defined(RATELIMIT) for_each_ofld_txq(vi, i, ofld_txq) { quiesce_wrq(&ofld_txq->wrq); } #endif for_each_rxq(vi, i, rxq) { quiesce_iq_fl(sc, &rxq->iq, &rxq->fl); } #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, i, ofld_rxq) { quiesce_iq_fl(sc, &ofld_rxq->iq, &ofld_rxq->fl); } #endif } static int t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, driver_intr_t *handler, void *arg, char *name) { int rc; irq->rid = rid; irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, RF_SHAREABLE | RF_ACTIVE); if (irq->res == NULL) { device_printf(sc->dev, "failed to allocate IRQ for rid %d, name %s.\n", rid, name); return (ENOMEM); } rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, NULL, handler, arg, &irq->tag); if (rc != 0) { device_printf(sc->dev, "failed to setup interrupt for rid %d, name %s: %d\n", rid, name, rc); } else if (name) bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name); return (rc); } static int t4_free_irq(struct adapter *sc, struct irq *irq) { if (irq->tag) bus_teardown_intr(sc->dev, irq->res, irq->tag); if (irq->res) bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); bzero(irq, sizeof(*irq)); return (0); } static void get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) { regs->version = chip_id(sc) | chip_rev(sc) << 10; t4_get_regs(sc, buf, regs->len); } #define A_PL_INDIR_CMD 0x1f8 #define S_PL_AUTOINC 31 #define M_PL_AUTOINC 0x1U #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) #define S_PL_VFID 20 #define M_PL_VFID 0xffU #define V_PL_VFID(x) ((x) << S_PL_VFID) #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) #define S_PL_ADDR 0 #define M_PL_ADDR 0xfffffU #define V_PL_ADDR(x) ((x) << S_PL_ADDR) #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) #define A_PL_INDIR_DATA 0x1fc static uint64_t read_vf_stat(struct adapter *sc, u_int vin, int reg) { u32 stats[2]; if (sc->flags & IS_VF) { stats[0] = t4_read_reg(sc, VF_MPS_REG(reg)); stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4)); } else { mtx_assert(&sc->reg_lock, MA_OWNED); t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(reg))); stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); } return (((uint64_t)stats[1]) << 32 | stats[0]); } static void t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats) { #define GET_STAT(name) \ read_vf_stat(sc, vin, A_MPS_VF_STAT_##name##_L) if (!(sc->flags & IS_VF)) mtx_lock(&sc->reg_lock); stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); if (!(sc->flags & IS_VF)) mtx_unlock(&sc->reg_lock); #undef GET_STAT } static void t4_clr_vi_stats(struct adapter *sc, u_int vin) { int reg; t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) t4_write_reg(sc, A_PL_INDIR_DATA, 0); } static void vi_refresh_stats(struct vi_info *vi) { struct timeval tv; const struct timeval interval = {0, 250000}; /* 250ms */ mtx_assert(&vi->tick_mtx, MA_OWNED); if (!(vi->flags & VI_INIT_DONE) || vi->flags & VI_SKIP_STATS) return; getmicrotime(&tv); timevalsub(&tv, &interval); if (timevalcmp(&tv, &vi->last_refreshed, <)) return; t4_get_vi_stats(vi->adapter, vi->vin, &vi->stats); getmicrotime(&vi->last_refreshed); } static void cxgbe_refresh_stats(struct vi_info *vi) { u_int i, v, tnl_cong_drops, chan_map; struct timeval tv; const struct timeval interval = {0, 250000}; /* 250ms */ struct port_info *pi; struct adapter *sc; mtx_assert(&vi->tick_mtx, MA_OWNED); if (vi->flags & VI_SKIP_STATS) return; getmicrotime(&tv); timevalsub(&tv, &interval); if (timevalcmp(&tv, &vi->last_refreshed, <)) return; pi = vi->pi; sc = vi->adapter; tnl_cong_drops = 0; t4_get_port_stats(sc, pi->tx_chan, &pi->stats); chan_map = pi->rx_e_chan_map; while (chan_map) { i = ffs(chan_map) - 1; mtx_lock(&sc->reg_lock); t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1, A_TP_MIB_TNL_CNG_DROP_0 + i); mtx_unlock(&sc->reg_lock); tnl_cong_drops += v; chan_map &= ~(1 << i); } pi->tnl_cong_drops = tnl_cong_drops; getmicrotime(&vi->last_refreshed); } static void cxgbe_tick(void *arg) { struct vi_info *vi = arg; MPASS(IS_MAIN_VI(vi)); mtx_assert(&vi->tick_mtx, MA_OWNED); cxgbe_refresh_stats(vi); callout_schedule(&vi->tick, hz); } static void vi_tick(void *arg) { struct vi_info *vi = arg; mtx_assert(&vi->tick_mtx, MA_OWNED); vi_refresh_stats(vi); callout_schedule(&vi->tick, hz); } /* * Should match fw_caps_config_ enums in t4fw_interface.h */ static char *caps_decoder[] = { "\20\001IPMI\002NCSI", /* 0: NBM */ "\20\001PPP\002QFC\003DCBX", /* 1: link */ "\20\001INGRESS\002EGRESS", /* 2: switch */ "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ "\006HASHFILTER\007ETHOFLD", "\20\001TOE", /* 4: TOE */ "\20\001RDDP\002RDMAC", /* 5: RDMA */ "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" "\007T10DIF" "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", "\20\001LOOKASIDE\002TLSKEYS\003IPSEC_INLINE" /* 7: Crypto */ "\004TLS_HW", "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ "\004PO_INITIATOR\005PO_TARGET", }; void t4_sysctls(struct adapter *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children, *c0; static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; ctx = device_get_sysctl_ctx(sc->dev); /* * dev.t4nex.X. */ oid = device_get_sysctl_tree(sc->dev); c0 = children = SYSCTL_CHILDREN(oid); sc->sc_do_rxcopy = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, sc->params.nports, "# of ports"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, doorbells, (uintptr_t)&sc->doorbells, sysctl_bitfield_8b, "A", "available doorbells"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, sc->params.vpd.cclk, "core clock frequency (in KHz)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc->params.sge.timer_val, sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", "interrupt holdoff timer values (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc->params.sge.counter_val, sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", "interrupt holdoff packet counter values"); t4_sge_sysctls(sc, ctx, children); sc->lro_timeout = 100; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW, &sc->debug_flags, 0, "flags to enable runtime debugging"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version, 0, "firmware version"); if (sc->flags & IS_VF) return; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, NULL, chip_rev(sc), "chip hardware revision"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn", CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn", CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec", CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version", CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na", CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD, sc->er_version, 0, "expansion ROM version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD, sc->bs_version, 0, "bootstrap firmware version"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD, NULL, sc->params.scfg_vers, "serial config version"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD, NULL, sc->params.vpd_vers, "VPD version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, sc->cfcsum, "config file checksum"); #define SYSCTL_CAP(name, n, text) \ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, caps_decoder[n], \ (uintptr_t)&sc->name, sysctl_bitfield_16b, "A", \ "available " text " capabilities") SYSCTL_CAP(nbmcaps, 0, "NBM"); SYSCTL_CAP(linkcaps, 1, "link"); SYSCTL_CAP(switchcaps, 2, "switch"); SYSCTL_CAP(niccaps, 3, "NIC"); SYSCTL_CAP(toecaps, 4, "TCP offload"); SYSCTL_CAP(rdmacaps, 5, "RDMA"); SYSCTL_CAP(iscsicaps, 6, "iSCSI"); SYSCTL_CAP(cryptocaps, 7, "crypto"); SYSCTL_CAP(fcoecaps, 8, "FCoE"); #undef SYSCTL_CAP SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, NULL, sc->tids.nftids, "number of filters"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_temperature, "I", "chip temperature (in Celsius)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset_sensor", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_reset_sensor, "I", "reset the chip's temperature sensor."); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_loadavg, "A", "microprocessor load averages (debug firmwares only)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_vdd, "I", "core Vdd (in mV)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, LOCAL_CPUS, sysctl_cpus, "A", "local CPUs"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, INTR_CPUS, sysctl_cpus, "A", "preferred CPUs for interrupts"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "swintr", CTLFLAG_RW, &sc->swintr, 0, "software triggered interrupts"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_reset, "I", "1 = reset adapter, 0 = zero reset counter"); /* * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. */ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, "logs and miscellaneous information"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cctrl, "A", "congestion control"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1, sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2, sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3, sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4, sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5, sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_la, "A", "CIM logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); if (chip_id(sc) > CHELSIO_T4) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 6 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 7 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); } SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cim_qcfg, "A", "CIM queue configuration"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_cpl_stats, "A", "CPL statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_ddp_stats, "A", "non-TCP DDP statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tid_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tid_stats, "A", "tid stats"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_devlog, "A", "firmware's device log"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_fcoe_stats, "A", "FCoE statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_hw_sched, "A", "hardware scheduler "); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_l2t, "A", "hardware L2 table"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_smt, "A", "hardware source MAC table"); #ifdef INET6 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "clip", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_clip, "A", "active CLIP table entries"); #endif SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_lb_stats, "A", "loopback statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_meminfo, "A", "memory regions"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, "A", "MPS TCAM entries"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_path_mtus, "A", "path MTUs"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_pm_stats, "A", "PM statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_rdma_stats, "A", "RDMA statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tcp_stats, "A", "TCP statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tids, "A", "TID information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_err_stats, "A", "TP error statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tnl_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tnl_stats, "A", "TP tunnel statistics"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_la_mask, "I", "TP logic analyzer event capture mask"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_la, "A", "TP logic analyzer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tx_rate, "A", "Tx rate"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_ulprx_la, "A", "ULPRX logic analyzer"); if (chip_id(sc) >= CHELSIO_T5) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_wcwr_stats, "A", "write combined work requests"); } #ifdef KERN_TLS if (is_ktls(sc)) { /* * dev.t4nex.0.tls. */ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "tls", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "KERN_TLS parameters"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "inline_keys", CTLFLAG_RW, &sc->tlst.inline_keys, 0, "Always pass TLS " "keys in work requests (1) or attempt to store TLS keys " "in card memory."); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "combo_wrs", CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to combine " "TCB field updates with TLS record work requests."); } #endif #ifdef TCP_OFFLOAD if (is_offload(sc)) { int i; char s[4]; /* * dev.t4nex.X.toe. */ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE parameters"); children = SYSCTL_CHILDREN(oid); sc->tt.cong_algorithm = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm", CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control " "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, " "3 = highspeed)"); sc->tt.sndbuf = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, &sc->tt.sndbuf, 0, "hardware send buffer"); sc->tt.ddp = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW | CTLFLAG_SKIP, &sc->tt.ddp, 0, ""); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_zcopy", CTLFLAG_RW, &sc->tt.ddp, 0, "Enable zero-copy aio_read(2)"); sc->tt.rx_coalesce = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); sc->tt.tls = 0; SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tls, "I", "Inline TLS allowed"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_ports", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tls_rx_ports, "I", "TCP ports that use inline TLS+TOE RX"); sc->tt.tls_rx_timeout = t4_toe_tls_rx_timeout; SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_timeout", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tls_rx_timeout, "I", "Timeout in seconds to downgrade TLS sockets to plain TOE"); sc->tt.tx_align = -1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); sc->tt.tx_zcopy = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy", CTLFLAG_RW, &sc->tt.tx_zcopy, 0, "Enable zero-copy aio_write(2)"); sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cop_managed_offloading", CTLFLAG_RW, &sc->tt.cop_managed_offloading, 0, "COP (Connection Offload Policy) controls all TOE offload"); sc->tt.autorcvbuf_inc = 16 * 1024; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "autorcvbuf_inc", CTLFLAG_RW, &sc->tt.autorcvbuf_inc, 0, "autorcvbuf increment"); sc->tt.update_hc_on_pmtu_change = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "update_hc_on_pmtu_change", CTLFLAG_RW, &sc->tt.update_hc_on_pmtu_change, 0, "Update hostcache entry if the PMTU changes"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_tick, "A", "TP timer tick (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1, sysctl_tp_tick, "A", "TCP timestamp tick (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2, sysctl_tp_tick, "A", "DACK tick (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_tp_dack_timer, "IU", "DACK timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_RXT_MIN, sysctl_tp_timer, "LU", "Minimum retransmit interval (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_RXT_MAX, sysctl_tp_timer, "LU", "Maximum retransmit interval (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_PERS_MIN, sysctl_tp_timer, "LU", "Persist timer min (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_PERS_MAX, sysctl_tp_timer, "LU", "Persist timer max (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_KEEP_IDLE, sysctl_tp_timer, "LU", "Keepalive idle timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_KEEP_INTVL, sysctl_tp_timer, "LU", "Keepalive interval timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_INIT_SRTT, sysctl_tp_timer, "LU", "Initial SRTT (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, A_TP_FINWAIT2_TIMER, sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, S_SYNSHIFTMAX, sysctl_tp_shift_cnt, "IU", "Number of SYN retransmissions before abort"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, S_RXTSHIFTMAXR2, sysctl_tp_shift_cnt, "IU", "Number of retransmissions before abort"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, S_KEEPALIVEMAXR2, sysctl_tp_shift_cnt, "IU", "Number of keepalive probes before abort"); oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE retransmit backoffs"); children = SYSCTL_CHILDREN(oid); for (i = 0; i < 16; i++) { snprintf(s, sizeof(s), "%u", i); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s, CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i, sysctl_tp_backoff, "IU", "TOE retransmit backoff"); } } #endif } void vi_sysctls(struct vi_info *vi) { struct sysctl_ctx_list *ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children; ctx = device_get_sysctl_ctx(vi->dev); /* * dev.v?(cxgbe|cxl).X. */ oid = device_get_sysctl_tree(vi->dev); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, vi->viid, "VI identifer"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, &vi->nrxq, 0, "# of rx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, &vi->ntxq, 0, "# of tx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, &vi->first_rxq, 0, "index of first rx queue"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, &vi->first_txq, 0, "index of first tx queue"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_base", CTLFLAG_RD, NULL, vi->rss_base, "start of RSS indirection table"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL, vi->rss_size, "size of RSS indirection table"); if (IS_MAIN_VI(vi)) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_noflowq, "IU", "Reserve queue 0 for non-flowid packets"); } if (vi->adapter->flags & IS_VF) { MPASS(vi->flags & TX_USES_VM_WR); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_vm_wr", CTLFLAG_RD, NULL, 1, "use VM work requests for transmit"); } else { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_vm_wr", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_tx_vm_wr, "I", "use VM work requestes for transmit"); } #ifdef TCP_OFFLOAD if (vi->nofldrxq != 0) { SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, &vi->nofldrxq, 0, "# of rx queues for offloaded TCP connections"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", CTLFLAG_RD, &vi->first_ofld_rxq, 0, "index of first TOE rx queue"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_holdoff_tmr_idx_ofld, "I", "holdoff timer index for TOE queues"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_holdoff_pktc_idx_ofld, "I", "holdoff packet counter index for TOE queues"); } #endif #if defined(TCP_OFFLOAD) || defined(RATELIMIT) if (vi->nofldtxq != 0) { SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, &vi->nofldtxq, 0, "# of tx queues for TOE/ETHOFLD"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", CTLFLAG_RD, &vi->first_ofld_txq, 0, "index of first TOE/ETHOFLD tx queue"); } #endif #ifdef DEV_NETMAP if (vi->nnmrxq != 0) { SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, &vi->nnmrxq, 0, "# of netmap rx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, &vi->nnmtxq, 0, "# of netmap tx queues"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", CTLFLAG_RD, &vi->first_nm_rxq, 0, "index of first netmap rx queue"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", CTLFLAG_RD, &vi->first_nm_txq, 0, "index of first netmap tx queue"); } #endif SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_holdoff_tmr_idx, "I", "holdoff timer index"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_holdoff_pktc_idx, "I", "holdoff packet counter index"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_qsize_rxq, "I", "rx queue size"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, 0, sysctl_qsize_txq, "I", "tx queue size"); } static void cxgbe_sysctls(struct port_info *pi) { struct sysctl_ctx_list *ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children, *children2; struct adapter *sc = pi->adapter; int i; char name[16]; static char *tc_flags = {"\20\1USER\2SYNC\3ASYNC\4ERR"}; ctx = device_get_sysctl_ctx(pi->dev); /* * dev.cxgbe.X. */ oid = device_get_sysctl_tree(pi->dev); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 0, sysctl_btphy, "I", "PHY temperature (in Celsius)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, 1, sysctl_btphy, "I", "PHY firmware version"); } SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0, sysctl_pause_settings, "A", "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0, sysctl_fec, "A", "FECs to use (bit 0 = RS, 1 = FC, 2 = none, 5 = auto, 6 = module)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "module_fec", CTLTYPE_STRING | CTLFLAG_MPSAFE, pi, 0, sysctl_module_fec, "A", "FEC recommended by the cable/transceiver"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, pi, 0, sysctl_autoneg, "I", "autonegotiation (-1 = not supported)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "pcaps", CTLFLAG_RD, &pi->link_cfg.pcaps, 0, "port capabilities"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "acaps", CTLFLAG_RD, &pi->link_cfg.acaps, 0, "advertised capabilities"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpacaps", CTLFLAG_RD, &pi->link_cfg.lpacaps, 0, "link partner advertised capabilities"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, port_top_speed(pi), "max speed (in Gbps)"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL, pi->mps_bg_map, "MPS buffer group map"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD, NULL, pi->rx_e_chan_map, "TP rx e-channel map"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_c_chan", CTLFLAG_RD, NULL, pi->rx_c_chan, "TP rx c-channel"); if (sc->flags & IS_VF) return; /* * dev.(cxgbe|cxl).X.tc. */ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx scheduler traffic classes (cl_rl)"); children2 = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize", CTLFLAG_RW, &pi->sched_params->pktsize, 0, "pktsize for per-flow cl-rl (0 means up to the driver )"); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize", CTLFLAG_RW, &pi->sched_params->burstsize, 0, "burstsize for per-flow cl-rl (0 means up to the driver)"); for (i = 0; i < sc->chip_params->nsched_cls; i++) { struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i]; snprintf(name, sizeof(name), "%d", i); children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "traffic class")); SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, tc_flags, (uintptr_t)&tc->flags, sysctl_bitfield_8b, "A", "flags"); SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", CTLFLAG_RD, &tc->refcount, 0, "references to this class"); SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, (pi->port_id << 16) | i, sysctl_tc_params, "A", "traffic class parameters"); } /* * dev.cxgbe.X.stats. */ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "port statistics"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, &pi->tx_parse_error, 0, "# of tx packets with invalid length or # of segments"); #define T4_REGSTAT(name, stat, desc) \ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \ (is_t4(sc) ? PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_##stat##_L) : \ T5_PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_##stat##_L)), \ sysctl_handle_t4_reg64, "QU", desc) /* We get these from port_stats and they may be stale by up to 1s */ #define T4_PORTSTAT(name, desc) \ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ &pi->stats.name, desc) T4_REGSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames"); T4_REGSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames"); T4_REGSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames"); T4_REGSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames"); T4_REGSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames"); T4_REGSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames"); T4_REGSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range"); T4_REGSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range"); T4_REGSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames"); T4_REGSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted"); T4_REGSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted"); T4_REGSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted"); T4_REGSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted"); T4_REGSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted"); T4_REGSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted"); T4_REGSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted"); T4_REGSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted"); T4_REGSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted"); T4_REGSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames"); T4_REGSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames"); T4_REGSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames"); T4_REGSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames"); T4_REGSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames"); T4_REGSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU"); T4_REGSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames"); if (is_t6(sc)) { T4_PORTSTAT(rx_fcs_err, "# of frames received with bad FCS since last link up"); } else { T4_REGSTAT(rx_fcs_err, RX_PORT_CRC_ERROR, "# of frames received with bad FCS"); } T4_REGSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error"); T4_REGSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors"); T4_REGSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received"); T4_REGSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range"); T4_REGSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range"); T4_REGSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received"); T4_REGSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received"); T4_REGSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received"); T4_REGSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received"); T4_REGSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received"); T4_REGSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received"); T4_REGSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received"); T4_REGSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received"); T4_REGSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received"); T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows"); T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows"); T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows"); T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows"); T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets"); T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets"); T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets"); T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets"); #undef T4_REGSTAT #undef T4_PORTSTAT } static int sysctl_int_array(SYSCTL_HANDLER_ARGS) { int rc, *i, space = 0; struct sbuf sb; sbuf_new_for_sysctl(&sb, NULL, 64, req); for (i = arg1; arg2; arg2 -= sizeof(int), i++) { if (space) sbuf_printf(&sb, " "); sbuf_printf(&sb, "%d", *i); space = 1; } rc = sbuf_finish(&sb); sbuf_delete(&sb); return (rc); } static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS) { int rc; struct sbuf *sb; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS) { int rc; struct sbuf *sb; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_btphy(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; int op = arg2; struct adapter *sc = pi->adapter; u_int v; int rc; rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { /* XXX: magic numbers */ rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, &v); } end_synchronized_op(sc, 0); if (rc) return (rc); if (op == 0) v /= 256; rc = sysctl_handle_int(oidp, &v, 0, req); return (rc); } static int sysctl_noflowq(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; int rc, val; val = vi->rsrv_noflowq; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if ((val >= 1) && (vi->ntxq > 1)) vi->rsrv_noflowq = 1; else vi->rsrv_noflowq = 0; return (rc); } static int sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int rc, val, i; MPASS(!(sc->flags & IS_VF)); val = vi->flags & TX_USES_VM_WR ? 1 : 0; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (val != 0 && val != 1) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4txvm"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else if (vi->ifp->if_drv_flags & IFF_DRV_RUNNING) { /* * We don't want parse_pkt to run with one setting (VF or PF) * and then eth_tx to see a different setting but still use * stale information calculated by parse_pkt. */ rc = EBUSY; } else { struct port_info *pi = vi->pi; struct sge_txq *txq; uint32_t ctrl0; uint8_t npkt = sc->params.max_pkts_per_eth_tx_pkts_wr; if (val) { vi->flags |= TX_USES_VM_WR; vi->ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_VM_TSO; ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | V_TXPKT_INTF(pi->tx_chan)); if (!(sc->flags & IS_VF)) npkt--; } else { vi->flags &= ~TX_USES_VM_WR; vi->ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_TSO; ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); } for_each_txq(vi, i, txq) { txq->cpl_ctrl0 = ctrl0; txq->txp.max_npkt = npkt; } } end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int idx, rc, i; struct sge_rxq *rxq; uint8_t v; idx = vi->tmr_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < 0 || idx >= SGE_NTIMERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4tmr"); if (rc) return (rc); v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); for_each_rxq(vi, i, rxq) { #ifdef atomic_store_rel_8 atomic_store_rel_8(&rxq->iq.intr_params, v); #else rxq->iq.intr_params = v; #endif } vi->tmr_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (0); } static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int idx, rc; idx = vi->pktc_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < -1 || idx >= SGE_NCOUNTERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4pktc"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->pktc_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int qsize, rc; qsize = vi->qsize_rxq; rc = sysctl_handle_int(oidp, &qsize, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (qsize < 128 || (qsize & 7)) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4rxqs"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->qsize_rxq = qsize; end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int qsize, rc; qsize = vi->qsize_txq; rc = sysctl_handle_int(oidp, &qsize, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (qsize < 128 || qsize > 65536) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4txqs"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->qsize_txq = qsize; end_synchronized_op(sc, LOCK_HELD); return (rc); } static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; if (req->newptr == NULL) { struct sbuf *sb; static char *bits = "\20\1RX\2TX\3AUTO"; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); if (lc->link_ok) { sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) | (lc->requested_fc & PAUSE_AUTONEG), bits); } else { sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG), bits); } rc = sbuf_finish(sb); sbuf_delete(sb); } else { char s[2]; int n; s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)); s[1] = 0; rc = sysctl_handle_string(oidp, s, sizeof(s), req); if (rc != 0) return(rc); if (s[1] != 0) return (EINVAL); if (s[0] < '0' || s[0] > '9') return (EINVAL); /* not a number */ n = s[0] - '0'; if (n & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) return (EINVAL); /* some other bit is set too */ rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4PAUSE"); if (rc) return (rc); if (!hw_off_limits(sc)) { PORT_LOCK(pi); lc->requested_fc = n; fixup_link_config(pi); if (pi->up_vis > 0) rc = apply_link_config(pi); set_current_media(pi); PORT_UNLOCK(pi); } end_synchronized_op(sc, 0); } return (rc); } static int sysctl_fec(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; int8_t old; if (req->newptr == NULL) { struct sbuf *sb; static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2" "\5RSVD3\6auto\7module"; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); /* * Display the requested_fec when the link is down -- the actual * FEC makes sense only when the link is up. */ if (lc->link_ok) { sbuf_printf(sb, "%b", (lc->fec & M_FW_PORT_CAP32_FEC) | (lc->requested_fec & (FEC_AUTO | FEC_MODULE)), bits); } else { sbuf_printf(sb, "%b", lc->requested_fec, bits); } rc = sbuf_finish(sb); sbuf_delete(sb); } else { char s[8]; int n; snprintf(s, sizeof(s), "%d", lc->requested_fec == FEC_AUTO ? -1 : lc->requested_fec & (M_FW_PORT_CAP32_FEC | FEC_MODULE)); rc = sysctl_handle_string(oidp, s, sizeof(s), req); if (rc != 0) return(rc); n = strtol(&s[0], NULL, 0); if (n < 0 || n & FEC_AUTO) n = FEC_AUTO; else if (n & ~(M_FW_PORT_CAP32_FEC | FEC_MODULE)) return (EINVAL);/* some other bit is set too */ rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4fec"); if (rc) return (rc); PORT_LOCK(pi); old = lc->requested_fec; if (n == FEC_AUTO) lc->requested_fec = FEC_AUTO; else if (n == 0 || n == FEC_NONE) lc->requested_fec = FEC_NONE; else { if ((lc->pcaps | V_FW_PORT_CAP32_FEC(n & M_FW_PORT_CAP32_FEC)) != lc->pcaps) { rc = ENOTSUP; goto done; } lc->requested_fec = n & (M_FW_PORT_CAP32_FEC | FEC_MODULE); } if (!hw_off_limits(sc)) { fixup_link_config(pi); if (pi->up_vis > 0) { rc = apply_link_config(pi); if (rc != 0) { lc->requested_fec = old; if (rc == FW_EPROTO) rc = ENOTSUP; } } } done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); } return (rc); } static int sysctl_module_fec(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc; int8_t fec; struct sbuf *sb; static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3"; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (sb == NULL) return (ENOMEM); if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mfec") != 0) { rc = EBUSY; goto done; } if (hw_off_limits(sc)) { rc = ENXIO; goto done; } PORT_LOCK(pi); if (pi->up_vis == 0) { /* * If all the interfaces are administratively down the firmware * does not report transceiver changes. Refresh port info here. * This is the only reason we have a synchronized op in this * function. Just PORT_LOCK would have been enough otherwise. */ t4_update_port_info(pi); } fec = lc->fec_hint; if (pi->mod_type == FW_PORT_MOD_TYPE_NONE || !fec_supported(lc->pcaps)) { sbuf_printf(sb, "n/a"); } else { if (fec == 0) fec = FEC_NONE; sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits); } rc = sbuf_finish(sb); PORT_UNLOCK(pi); done: sbuf_delete(sb); end_synchronized_op(sc, 0); return (rc); } static int sysctl_autoneg(SYSCTL_HANDLER_ARGS) { struct port_info *pi = arg1; struct adapter *sc = pi->adapter; struct link_config *lc = &pi->link_cfg; int rc, val; if (lc->pcaps & FW_PORT_CAP32_ANEG) val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1; else val = -1; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (val == 0) val = AUTONEG_DISABLE; else if (val == 1) val = AUTONEG_ENABLE; else val = AUTONEG_AUTO; rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4aneg"); if (rc) return (rc); PORT_LOCK(pi); if (val == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) { rc = ENOTSUP; goto done; } lc->requested_aneg = val; if (!hw_off_limits(sc)) { fixup_link_config(pi); if (pi->up_vis > 0) rc = apply_link_config(pi); set_current_media(pi); } done: PORT_UNLOCK(pi); end_synchronized_op(sc, 0); return (rc); } static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, reg = arg2; uint64_t val; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; val = t4_read_reg64(sc, reg); } mtx_unlock(&sc->reg_lock); if (rc == 0) rc = sysctl_handle_64(oidp, &val, 0, req); return (rc); } static int sysctl_temperature(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, t; uint32_t param, val; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); } end_synchronized_op(sc, 0); if (rc) return (rc); /* unknown is returned as 0 but we display -1 in that case */ t = val == 0 ? -1 : val; rc = sysctl_handle_int(oidp, &t, 0, req); return (rc); } static int sysctl_vdd(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc; uint32_t param, val; if (sc->params.core_vdd == 0) { rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vdd"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); } end_synchronized_op(sc, 0); if (rc) return (rc); sc->params.core_vdd = val; } return (sysctl_handle_int(oidp, &sc->params.core_vdd, 0, req)); } static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, v; uint32_t param, val; v = sc->sensor_resets; rc = sysctl_handle_int(oidp, &v, 0, req); if (rc != 0 || req->newptr == NULL || v <= 0) return (rc); if (sc->params.fw_vers < FW_VERSION32(1, 24, 7, 0) || chip_id(sc) < CHELSIO_T5) return (ENOTSUP); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4srst"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_RESET_TMP_SENSOR)); val = 1; rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); } end_synchronized_op(sc, 0); if (rc == 0) sc->sensor_resets++; return (rc); } static int sysctl_loadavg(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint32_t param, val; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD); rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); } end_synchronized_op(sc, 0); if (rc) return (rc); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); if (val == 0xffffffff) { /* Only debug and custom firmwares report load averages. */ sbuf_printf(sb, "not available"); } else { sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff, (val >> 16) & 0xff); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_cctrl(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; uint16_t incr[NMTUS][NCCTRL_WIN]; static const char *dec_fac[] = { "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", "0.9375" }; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_read_cong_tbl(sc, incr); mtx_unlock(&sc->reg_lock); if (rc) goto done; for (i = 0; i < NCCTRL_WIN; ++i) { sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], incr[5][i], incr[6][i], incr[7][i]); sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", incr[8][i], incr[9][i], incr[10][i], incr[11][i], incr[12][i], incr[13][i], incr[14][i], incr[15][i], sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); } rc = sbuf_finish(sb); done: sbuf_delete(sb); return (rc); } static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ }; static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i, n, qid = arg2; uint32_t *buf, *p; char *qtype; u_int cim_num_obq = sc->chip_params->cim_num_obq; KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, ("%s: bad qid %d\n", __func__, qid)); if (qid < CIM_NUM_IBQ) { /* inbound queue */ qtype = "IBQ"; n = 4 * CIM_IBQ_SIZE; buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = -ENXIO; else rc = t4_read_cim_ibq(sc, qid, buf, n); mtx_unlock(&sc->reg_lock); } else { /* outbound queue */ qtype = "OBQ"; qid -= CIM_NUM_IBQ; n = 4 * cim_num_obq * CIM_OBQ_SIZE; buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = -ENXIO; else rc = t4_read_cim_obq(sc, qid, buf, n); mtx_unlock(&sc->reg_lock); } if (rc < 0) { rc = -rc; goto done; } n = rc * sizeof(uint32_t); /* rc has # of words actually read */ rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) goto done; sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); if (sb == NULL) { rc = ENOMEM; goto done; } sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); for (i = 0, p = buf; i < n; i += 16, p += 4) sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], p[2], p[3]); rc = sbuf_finish(sb); sbuf_delete(sb); done: free(buf, M_CXGBE); return (rc); } static void sbuf_cim_la4(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg) { uint32_t *p; sbuf_printf(sb, "Status Data PC%s", cfg & F_UPDBGLACAPTPCONLY ? "" : " LS0Stat LS0Addr LS0Data"); for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { if (cfg & F_UPDBGLACAPTPCONLY) { sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, p[6], p[7]); sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, p[4] & 0xff, p[5] >> 8); sbuf_printf(sb, "\n %02x %x%07x %x%07x", (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4); } else { sbuf_printf(sb, "\n %02x %x%07x %x%07x %08x %08x " "%08x%08x%08x%08x", (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], p[6], p[7]); } } } static void sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg) { uint32_t *p; sbuf_printf(sb, "Status Inst Data PC%s", cfg & F_UPDBGLACAPTPCONLY ? "" : " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { if (cfg & F_UPDBGLACAPTPCONLY) { sbuf_printf(sb, "\n %02x %08x %08x %08x", p[3] & 0xff, p[2], p[1], p[0]); sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, p[6] >> 16); } else { sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " "%08x %08x %08x %08x %08x %08x", (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, p[6] >> 16, p[2], p[1], p[0], p[5], p[4], p[3]); } } } static int sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags) { uint32_t cfg, *buf; int rc; MPASS(flags == M_WAITOK || flags == M_NOWAIT); buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, M_ZERO | flags); if (buf == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); if (rc == 0) rc = -t4_cim_read_la(sc, buf, NULL); } mtx_unlock(&sc->reg_lock); if (rc == 0) { if (chip_id(sc) < CHELSIO_T6) sbuf_cim_la4(sc, sb, buf, cfg); else sbuf_cim_la6(sc, sb, buf, cfg); } free(buf, M_CXGBE); return (rc); } static int sysctl_cim_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); rc = sbuf_cim_la(sc, sb, M_WAITOK); if (rc == 0) rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } bool t4_os_dump_cimla(struct adapter *sc, int arg, bool verbose) { struct sbuf sb; int rc; if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) return (false); rc = sbuf_cim_la(sc, &sb, M_NOWAIT); if (rc == 0) { rc = sbuf_finish(&sb); if (rc == 0) { log(LOG_DEBUG, "%s: CIM LA dump follows.\n%s", device_get_nameunit(sc->dev), sbuf_data(&sb)); } } sbuf_delete(&sb); return (false); } static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; u_int i; struct sbuf *sb; uint32_t *buf, *p; int rc; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); mtx_unlock(&sc->reg_lock); if (rc) goto done; p = buf; for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], p[1], p[0]); } sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, (p[1] >> 2) | ((p[2] & 3) << 30), (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, p[0] & 1); } rc = sbuf_finish(sb); done: sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; u_int i; struct sbuf *sb; uint32_t *buf, *p; int rc; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); mtx_unlock(&sc->reg_lock); if (rc) goto done; p = buf; sbuf_printf(sb, "Cntl ID DataBE Addr Data"); for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, p[4], p[3], p[2], p[1], p[0]); } sbuf_printf(sb, "\n\nCntl ID Data"); for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); } rc = sbuf_finish(sb); done: sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; uint16_t thres[CIM_NUM_IBQ]; uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; cim_num_obq = sc->chip_params->cim_num_obq; if (is_t4(sc)) { ibq_rdaddr = A_UP_IBQ_0_RDADDR; obq_rdaddr = A_UP_OBQ_0_REALADDR; } else { ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; } nq = CIM_NUM_IBQ + cim_num_obq; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); if (rc == 0) { rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); if (rc == 0) t4_read_cimq_cfg(sc, base, size, thres); } } mtx_unlock(&sc->reg_lock); if (rc) return (rc); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), G_QUEREMFLITS(p[2]) * 16); for ( ; i < nq; i++, p += 4, wr += 2) sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), G_QUEREMFLITS(p[2]) * 16); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_cpl_stats stats; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_cpl_stats(sc, &stats, 0); mtx_unlock(&sc->reg_lock); if (rc) goto done; if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3"); sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", stats.req[0], stats.req[1], stats.req[2], stats.req[3]); sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); } else { sbuf_printf(sb, " channel 0 channel 1"); sbuf_printf(sb, "\nCPL requests: %10u %10u", stats.req[0], stats.req[1]); sbuf_printf(sb, "\nCPL responses: %10u %10u", stats.rsp[0], stats.rsp[1]); } rc = sbuf_finish(sb); done: sbuf_delete(sb); return (rc); } static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_usm_stats stats; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_get_usm_stats(sc, &stats, 1); mtx_unlock(&sc->reg_lock); if (rc == 0) { sbuf_printf(sb, "Frames: %u\n", stats.frames); sbuf_printf(sb, "Octets: %ju\n", stats.octets); sbuf_printf(sb, "Drops: %u", stats.drops); rc = sbuf_finish(sb); } sbuf_delete(sb); return (rc); } static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_tid_stats stats; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_tid_stats(sc, &stats, 1); mtx_unlock(&sc->reg_lock); if (rc == 0) { sbuf_printf(sb, "Delete: %u\n", stats.del); sbuf_printf(sb, "Invalidate: %u\n", stats.inv); sbuf_printf(sb, "Active: %u\n", stats.act); sbuf_printf(sb, "Passive: %u", stats.pas); rc = sbuf_finish(sb); } sbuf_delete(sb); return (rc); } static const char * const devlog_level_strings[] = { [FW_DEVLOG_LEVEL_EMERG] = "EMERG", [FW_DEVLOG_LEVEL_CRIT] = "CRIT", [FW_DEVLOG_LEVEL_ERR] = "ERR", [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", [FW_DEVLOG_LEVEL_INFO] = "INFO", [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" }; static const char * const devlog_facility_strings[] = { [FW_DEVLOG_FACILITY_CORE] = "CORE", [FW_DEVLOG_FACILITY_CF] = "CF", [FW_DEVLOG_FACILITY_SCHED] = "SCHED", [FW_DEVLOG_FACILITY_TIMER] = "TIMER", [FW_DEVLOG_FACILITY_RES] = "RES", [FW_DEVLOG_FACILITY_HW] = "HW", [FW_DEVLOG_FACILITY_FLR] = "FLR", [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", [FW_DEVLOG_FACILITY_PHY] = "PHY", [FW_DEVLOG_FACILITY_MAC] = "MAC", [FW_DEVLOG_FACILITY_PORT] = "PORT", [FW_DEVLOG_FACILITY_VI] = "VI", [FW_DEVLOG_FACILITY_FILTER] = "FILTER", [FW_DEVLOG_FACILITY_ACL] = "ACL", [FW_DEVLOG_FACILITY_TM] = "TM", [FW_DEVLOG_FACILITY_QFC] = "QFC", [FW_DEVLOG_FACILITY_DCB] = "DCB", [FW_DEVLOG_FACILITY_ETH] = "ETH", [FW_DEVLOG_FACILITY_OFLD] = "OFLD", [FW_DEVLOG_FACILITY_RI] = "RI", [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", [FW_DEVLOG_FACILITY_FCOE] = "FCOE", [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", [FW_DEVLOG_FACILITY_CHNET] = "CHNET", }; static int sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags) { int i, j, rc, nentries, first = 0; struct devlog_params *dparams = &sc->params.devlog; struct fw_devlog_e *buf, *e; uint64_t ftstamp = UINT64_MAX; if (dparams->addr == 0) return (ENXIO); MPASS(flags == M_WAITOK || flags == M_NOWAIT); buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags); if (buf == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; nentries = dparams->size / sizeof(struct fw_devlog_e); for (i = 0; i < nentries; i++) { e = &buf[i]; if (e->timestamp == 0) break; /* end */ e->timestamp = be64toh(e->timestamp); e->seqno = be32toh(e->seqno); for (j = 0; j < 8; j++) e->params[j] = be32toh(e->params[j]); if (e->timestamp < ftstamp) { ftstamp = e->timestamp; first = i; } } if (buf[first].timestamp == 0) goto done; /* nothing in the log */ sbuf_printf(sb, "%10s %15s %8s %8s %s\n", "Seq#", "Tstamp", "Level", "Facility", "Message"); i = first; do { e = &buf[i]; if (e->timestamp == 0) break; /* end */ sbuf_printf(sb, "%10d %15ju %8s %8s ", e->seqno, e->timestamp, (e->level < nitems(devlog_level_strings) ? devlog_level_strings[e->level] : "UNKNOWN"), (e->facility < nitems(devlog_facility_strings) ? devlog_facility_strings[e->facility] : "UNKNOWN")); sbuf_printf(sb, e->fmt, e->params[0], e->params[1], e->params[2], e->params[3], e->params[4], e->params[5], e->params[6], e->params[7]); if (++i == nentries) i = 0; } while (i != first); done: free(buf, M_CXGBE); return (rc); } static int sysctl_devlog(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc; struct sbuf *sb; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); rc = sbuf_devlog(sc, sb, M_WAITOK); if (rc == 0) rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } void t4_os_dump_devlog(struct adapter *sc) { int rc; struct sbuf sb; if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) return; rc = sbuf_devlog(sc, &sb, M_NOWAIT); if (rc == 0) { rc = sbuf_finish(&sb); if (rc == 0) { log(LOG_DEBUG, "%s: device log follows.\n%s", device_get_nameunit(sc->dev), sbuf_data(&sb)); } } sbuf_delete(&sb); } static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_fcoe_stats stats[MAX_NCHAN]; int i, nchan = sc->chip_params->nchan; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { for (i = 0; i < nchan; i++) t4_get_fcoe_stats(sc, i, &stats[i], 1); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3"); sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", stats[0].octets_ddp, stats[1].octets_ddp, stats[2].octets_ddp, stats[3].octets_ddp); sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", stats[0].frames_ddp, stats[1].frames_ddp, stats[2].frames_ddp, stats[3].frames_ddp); sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", stats[0].frames_drop, stats[1].frames_drop, stats[2].frames_drop, stats[3].frames_drop); } else { sbuf_printf(sb, " channel 0 channel 1"); sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", stats[0].octets_ddp, stats[1].octets_ddp); sbuf_printf(sb, "\nframesDDP: %16u %16u", stats[0].frames_ddp, stats[1].frames_ddp); sbuf_printf(sb, "\nframesDrop: %16u %16u", stats[0].frames_drop, stats[1].frames_drop); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; unsigned int map, kbps, ipg, mode; unsigned int pace_tab[NTX_SCHED]; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 512, req); if (sb == NULL) return (ENOMEM); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); t4_read_pace_tbl(sc, pace_tab); sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " "Class IPG (0.1 ns) Flow IPG (us)"); for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { t4_get_tx_sched(sc, i, &kbps, &ipg, 1); sbuf_printf(sb, "\n %u %-5s %u ", i, (mode & (1 << i)) ? "flow" : "class", map & 3); if (kbps) sbuf_printf(sb, "%9u ", kbps); else sbuf_printf(sb, " disabled "); if (ipg) sbuf_printf(sb, "%13u ", ipg); else sbuf_printf(sb, " disabled "); if (pace_tab[i]) sbuf_printf(sb, "%10u", pace_tab[i]); else sbuf_printf(sb, " disabled"); } rc = sbuf_finish(sb); done: mtx_unlock(&sc->reg_lock); sbuf_delete(sb); return (rc); } static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i, j; uint64_t *p0, *p1; struct lb_port_stats s[2]; static const char *stat_name[] = { "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", "Frames128To255:", "Frames256To511:", "Frames512To1023:", "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", "BG2FramesTrunc:", "BG3FramesTrunc:" }; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); memset(s, 0, sizeof(s)); for (i = 0; i < sc->chip_params->nchan; i += 2) { mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_get_lb_stats(sc, i, &s[0]); t4_get_lb_stats(sc, i + 1, &s[1]); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; p0 = &s[0].octets; p1 = &s[1].octets; sbuf_printf(sb, "%s Loopback %u" " Loopback %u", i == 0 ? "" : "\n", i, i + 1); for (j = 0; j < nitems(stat_name); j++) sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], *p0++, *p1++); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) { int rc = 0; struct port_info *pi = arg1; struct link_config *lc = &pi->link_cfg; struct sbuf *sb; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); if (sb == NULL) return (ENOMEM); if (lc->link_ok || lc->link_down_rc == 255) sbuf_printf(sb, "n/a"); else sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc)); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } struct mem_desc { unsigned int base; unsigned int limit; unsigned int idx; }; static int mem_desc_cmp(const void *a, const void *b) { return ((const struct mem_desc *)a)->base - ((const struct mem_desc *)b)->base; } static void mem_region_show(struct sbuf *sb, const char *name, unsigned int from, unsigned int to) { unsigned int size; if (from == to) return; size = to - from + 1; if (size == 0) return; /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); } static int sysctl_meminfo(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i, n; uint32_t lo, hi, used, alloc; static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; static const char *region[] = { "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", "TDDP region:", "TPT region:", "STAG region:", "RQ region:", "RQUDP region:", "PBL region:", "TXPBL region:", "DBVFIFO region:", "ULPRX state:", "ULPTX state:", "On-chip queues:", "TLS keys:", }; struct mem_desc avail[4]; struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ struct mem_desc *md = mem; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); for (i = 0; i < nitems(mem); i++) { mem[i].limit = 0; mem[i].idx = i; } mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } /* Find and sort the populated memory ranges */ i = 0; lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); if (lo & F_EDRAM0_ENABLE) { hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); avail[i].base = G_EDRAM0_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); avail[i].idx = 0; i++; } if (lo & F_EDRAM1_ENABLE) { hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); avail[i].base = G_EDRAM1_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); avail[i].idx = 1; i++; } if (lo & F_EXT_MEM_ENABLE) { hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); avail[i].base = G_EXT_MEM_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20); avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ i++; } if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); avail[i].base = G_EXT_MEM1_BASE(hi) << 20; avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20); avail[i].idx = 4; i++; } if (!i) /* no memory available */ goto done; qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); /* the next few have explicit upper bounds */ md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); md->limit = md->base - 1 + t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); md++; md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); md->limit = md->base - 1 + t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); md++; if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { if (chip_id(sc) <= CHELSIO_T5) md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); else md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); md->limit = 0; } else { md->base = 0; md->idx = nitems(region); /* hide it */ } md++; #define ulp_region(reg) \ md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) ulp_region(RX_ISCSI); ulp_region(RX_TDDP); ulp_region(TX_TPT); ulp_region(RX_STAG); ulp_region(RX_RQ); ulp_region(RX_RQUDP); ulp_region(RX_PBL); ulp_region(TX_PBL); #undef ulp_region md->base = 0; md->idx = nitems(region); if (!is_t4(sc)) { uint32_t size = 0; uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); if (is_t5(sc)) { if (sge_ctrl & F_VFIFO_ENABLE) size = G_DBVFIFO_SIZE(fifo_size); } else size = G_T6_DBVFIFO_SIZE(fifo_size); if (size) { md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR)); md->limit = md->base + (size << 2) - 1; } } md++; md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); md->limit = 0; md++; md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); md->limit = 0; md++; md->base = sc->vres.ocq.start; if (sc->vres.ocq.size) md->limit = md->base + sc->vres.ocq.size - 1; else md->idx = nitems(region); /* hide it */ md++; md->base = sc->vres.key.start; if (sc->vres.key.size) md->limit = md->base + sc->vres.key.size - 1; else md->idx = nitems(region); /* hide it */ md++; /* add any address-space holes, there can be up to 3 */ for (n = 0; n < i - 1; n++) if (avail[n].limit < avail[n + 1].base) (md++)->base = avail[n].limit; if (avail[n].limit) (md++)->base = avail[n].limit; n = md - mem; qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); for (lo = 0; lo < i; lo++) mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, avail[lo].limit - 1); sbuf_printf(sb, "\n"); for (i = 0; i < n; i++) { if (mem[i].idx >= nitems(region)) continue; /* skip holes */ if (!mem[i].limit) mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; mem_region_show(sb, region[mem[i].idx], mem[i].base, mem[i].limit); } sbuf_printf(sb, "\n"); lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; mem_region_show(sb, "uP RAM:", lo, hi); lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; mem_region_show(sb, "uP Extmem2:", lo, hi); lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", G_PMRXMAXPAGE(lo), t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, (lo & F_PMRXNUMCHN) ? 2 : 1); lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", G_PMTXMAXPAGE(lo), hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); sbuf_printf(sb, "%u p-structs\n", t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); for (i = 0; i < 4; i++) { if (chip_id(sc) > CHELSIO_T5) lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); else lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); if (is_t5(sc)) { used = G_T5_USED(lo); alloc = G_T5_ALLOC(lo); } else { used = G_USED(lo); alloc = G_ALLOC(lo); } /* For T6 these are MAC buffer groups */ sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", i, used, alloc); } for (i = 0; i < sc->chip_params->nchan; i++) { if (chip_id(sc) > CHELSIO_T5) lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); else lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); if (is_t5(sc)) { used = G_T5_USED(lo); alloc = G_T5_ALLOC(lo); } else { used = G_USED(lo); alloc = G_ALLOC(lo); } /* For T6 these are MAC buffer groups */ sbuf_printf(sb, "\nLoopback %d using %u pages out of %u allocated", i, used, alloc); } done: mtx_unlock(&sc->reg_lock); if (rc == 0) rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static inline void tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) { *mask = x | y; y = htobe64(y); memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); } static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; MPASS(chip_id(sc) <= CHELSIO_T5); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "Idx Ethernet address Mask Vld Ports PF" " VF Replication P0 P1 P2 P3 ML"); for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { uint64_t tcamx, tcamy, mask; uint32_t cls_lo, cls_hi; uint8_t addr[ETHER_ADDR_LEN]; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; if (tcamx & tcamy) continue; tcamxy2valmask(tcamx, tcamy, addr, &mask); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (uintmax_t)mask, (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', G_PORTMAP(cls_hi), G_PF(cls_lo), (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); if (cls_lo & F_REPLICATE) { struct fw_ldst_cmd ldst_cmd; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = htobe32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); ldst_cmd.u.mps.rplc.fid_idx = htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | V_FW_LDST_CMD_IDX(i)); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mps"); if (rc) break; if (hw_off_limits(sc)) rc = ENXIO; else rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); end_synchronized_op(sc, 0); if (rc != 0) break; else { sbuf_printf(sb, " %08x %08x %08x %08x", be32toh(ldst_cmd.u.mps.rplc.rplc127_96), be32toh(ldst_cmd.u.mps.rplc.rplc95_64), be32toh(ldst_cmd.u.mps.rplc.rplc63_32), be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); } } else sbuf_printf(sb, "%36s", ""); sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); } if (rc) (void) sbuf_finish(sb); else rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; MPASS(chip_id(sc) > CHELSIO_T5); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" " Replication" " P0 P1 P2 P3 ML\n"); for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { uint8_t dip_hit, vlan_vld, lookup_type, port_num; uint16_t ivlan; uint64_t tcamx, tcamy, val, mask; uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; uint8_t addr[ETHER_ADDR_LEN]; ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); if (i < 256) ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); else ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); tcamy = G_DMACH(val) << 32; tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; lookup_type = G_DATALKPTYPE(data2); port_num = G_DATAPORTNUM(data2); if (lookup_type && lookup_type != M_DATALKPTYPE) { /* Inner header VNI */ vniy = ((data2 & F_DATAVIDH2) << 23) | (G_DATAVIDH1(data2) << 16) | G_VIDL(val); dip_hit = data2 & F_DATADIPHIT; vlan_vld = 0; } else { vniy = 0; dip_hit = 0; vlan_vld = data2 & F_DATAVIDH2; ivlan = G_VIDL(val); } ctl |= V_CTLXYBITSEL(1); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); tcamx = G_DMACH(val) << 32; tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; if (lookup_type && lookup_type != M_DATALKPTYPE) { /* Inner header VNI mask */ vnix = ((data2 & F_DATAVIDH2) << 23) | (G_DATAVIDH1(data2) << 16) | G_VIDL(val); } else vnix = 0; if (tcamx & tcamy) continue; tcamxy2valmask(tcamx, tcamy, addr, &mask); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); } mtx_unlock(&sc->reg_lock); if (rc != 0) break; if (lookup_type && lookup_type != M_DATALKPTYPE) { sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " "%012jx %06x %06x - - %3c" " I %4x %3c %#x%4u%4d", i, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', G_PORTMAP(cls_hi), G_T6_PF(cls_lo), cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); } else { sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " "%012jx - - ", i, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], (uintmax_t)mask); if (vlan_vld) sbuf_printf(sb, "%4u Y ", ivlan); else sbuf_printf(sb, " - N "); sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", lookup_type ? 'I' : 'O', port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', G_PORTMAP(cls_hi), G_T6_PF(cls_lo), cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); } if (cls_lo & F_T6_REPLICATE) { struct fw_ldst_cmd ldst_cmd; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); ldst_cmd.op_to_addrspace = htobe32(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); ldst_cmd.u.mps.rplc.fid_idx = htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | V_FW_LDST_CMD_IDX(i)); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t6mps"); if (rc) break; if (hw_off_limits(sc)) rc = ENXIO; else rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); end_synchronized_op(sc, 0); if (rc != 0) break; else { sbuf_printf(sb, " %08x %08x %08x %08x" " %08x %08x %08x %08x", be32toh(ldst_cmd.u.mps.rplc.rplc255_224), be32toh(ldst_cmd.u.mps.rplc.rplc223_192), be32toh(ldst_cmd.u.mps.rplc.rplc191_160), be32toh(ldst_cmd.u.mps.rplc.rplc159_128), be32toh(ldst_cmd.u.mps.rplc.rplc127_96), be32toh(ldst_cmd.u.mps.rplc.rplc95_64), be32toh(ldst_cmd.u.mps.rplc.rplc63_32), be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); } } else sbuf_printf(sb, "%72s", ""); sbuf_printf(sb, "%4u%3u%3u%3u %#x", G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), (cls_lo >> S_T6_MULTILISTEN0) & 0xf); } if (rc) (void) sbuf_finish(sb); else rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint16_t mtus[NMTUS]; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_read_mtu_tbl(sc, mtus, NULL); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], mtus[14], mtus[15]); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc, i; uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; static const char *tx_stats[MAX_PM_NSTATS] = { "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", "Tx FIFO wait", NULL, "Tx latency" }; static const char *rx_stats[MAX_PM_NSTATS] = { "Read:", "Write bypass:", "Write mem:", "Flush:", "Rx FIFO wait", NULL, "Rx latency" }; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, " Tx pcmds Tx bytes"); for (i = 0; i < 4; i++) { sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], tx_cyc[i]); } sbuf_printf(sb, "\n Rx pcmds Rx bytes"); for (i = 0; i < 4; i++) { sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], rx_cyc[i]); } if (chip_id(sc) > CHELSIO_T5) { sbuf_printf(sb, "\n Total wait Total occupancy"); sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], tx_cyc[i]); sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], rx_cyc[i]); i += 2; MPASS(i < nitems(tx_stats)); sbuf_printf(sb, "\n Reads Total wait"); sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], tx_cyc[i]); sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], rx_cyc[i]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_rdma_stats stats; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_rdma_stats(sc, &stats, 0); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_tcp_stats v4, v6; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_tcp_stats(sc, &v4, &v6, 0); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); sbuf_printf(sb, " IP IPv6\n"); sbuf_printf(sb, "OutRsts: %20u %20u\n", v4.tcp_out_rsts, v6.tcp_out_rsts); sbuf_printf(sb, "InSegs: %20ju %20ju\n", v4.tcp_in_segs, v6.tcp_in_segs); sbuf_printf(sb, "OutSegs: %20ju %20ju\n", v4.tcp_out_segs, v6.tcp_out_segs); sbuf_printf(sb, "RetransSegs: %20ju %20ju", v4.tcp_retrans_segs, v6.tcp_retrans_segs); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tids(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint32_t x, y; struct tid_info *t = &sc->tids; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (t->natids) { sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, t->atids_in_use); } if (t->nhpftids) { sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n", t->hpftid_base, t->hpftid_end, t->hpftids_in_use); } if (t->ntids) { bool hashen = false; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { hashen = true; if (chip_id(sc) <= CHELSIO_T5) { x = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; y = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; } else { x = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX); y = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE); } } mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; sbuf_printf(sb, "TID range: "); if (hashen) { if (x) sbuf_printf(sb, "%u-%u, ", t->tid_base, x - 1); sbuf_printf(sb, "%u-%u", y, t->ntids - 1); } else { sbuf_printf(sb, "%u-%u", t->tid_base, t->tid_base + t->ntids - 1); } sbuf_printf(sb, ", in use: %u\n", atomic_load_acq_int(&t->tids_in_use)); } if (t->nstids) { sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, t->stid_base + t->nstids - 1, t->stids_in_use); } if (t->nftids) { sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base, t->ftid_end, t->ftids_in_use); } if (t->netids) { sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base, t->etid_base + t->netids - 1, t->etids_in_use); } mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { x = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4); y = t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6); } mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", x, y); done: if (rc == 0) rc = sbuf_finish(sb); else (void)sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_err_stats stats; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_err_stats(sc, &stats, 0); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3\n"); sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", stats.mac_in_errs[0], stats.mac_in_errs[1], stats.mac_in_errs[2], stats.mac_in_errs[3]); sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", stats.hdr_in_errs[0], stats.hdr_in_errs[1], stats.hdr_in_errs[2], stats.hdr_in_errs[3]); sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", stats.tcp_in_errs[0], stats.tcp_in_errs[1], stats.tcp_in_errs[2], stats.tcp_in_errs[3]); sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); } else { sbuf_printf(sb, " channel 0 channel 1\n"); sbuf_printf(sb, "macInErrs: %10u %10u\n", stats.mac_in_errs[0], stats.mac_in_errs[1]); sbuf_printf(sb, "hdrInErrs: %10u %10u\n", stats.hdr_in_errs[0], stats.hdr_in_errs[1]); sbuf_printf(sb, "tcpInErrs: %10u %10u\n", stats.tcp_in_errs[0], stats.tcp_in_errs[1]); sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); } sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", stats.ofld_no_neigh, stats.ofld_cong_defer); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tnl_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; struct tp_tnl_stats stats; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return(rc); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_tp_get_tnl_stats(sc, &stats, 1); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3\n"); sbuf_printf(sb, "OutPkts: %10u %10u %10u %10u\n", stats.out_pkt[0], stats.out_pkt[1], stats.out_pkt[2], stats.out_pkt[3]); sbuf_printf(sb, "InPkts: %10u %10u %10u %10u", stats.in_pkt[0], stats.in_pkt[1], stats.in_pkt[2], stats.in_pkt[3]); } else { sbuf_printf(sb, " channel 0 channel 1\n"); sbuf_printf(sb, "OutPkts: %10u %10u\n", stats.out_pkt[0], stats.out_pkt[1]); sbuf_printf(sb, "InPkts: %10u %10u", stats.in_pkt[0], stats.in_pkt[1]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct tp_params *tpp = &sc->params.tp; u_int mask; int rc; mask = tpp->la_mask >> 16; rc = sysctl_handle_int(oidp, &mask, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (mask > 0xffff) return (EINVAL); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { tpp->la_mask = mask << 16; t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); } mtx_unlock(&sc->reg_lock); return (rc); } struct field_desc { const char *name; u_int start; u_int width; }; static void field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) { char buf[32]; int line_size = 0; while (f->name) { uint64_t mask = (1ULL << f->width) - 1; int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, ((uintmax_t)v >> f->start) & mask); if (line_size + len >= 79) { line_size = 8; sbuf_printf(sb, "\n "); } sbuf_printf(sb, "%s ", buf); line_size += len + 1; f++; } sbuf_printf(sb, "\n"); } static const struct field_desc tp_la0[] = { { "RcfOpCodeOut", 60, 4 }, { "State", 56, 4 }, { "WcfState", 52, 4 }, { "RcfOpcSrcOut", 50, 2 }, { "CRxError", 49, 1 }, { "ERxError", 48, 1 }, { "SanityFailed", 47, 1 }, { "SpuriousMsg", 46, 1 }, { "FlushInputMsg", 45, 1 }, { "FlushInputCpl", 44, 1 }, { "RssUpBit", 43, 1 }, { "RssFilterHit", 42, 1 }, { "Tid", 32, 10 }, { "InitTcb", 31, 1 }, { "LineNumber", 24, 7 }, { "Emsg", 23, 1 }, { "EdataOut", 22, 1 }, { "Cmsg", 21, 1 }, { "CdataOut", 20, 1 }, { "EreadPdu", 19, 1 }, { "CreadPdu", 18, 1 }, { "TunnelPkt", 17, 1 }, { "RcfPeerFin", 16, 1 }, { "RcfReasonOut", 12, 4 }, { "TxCchannel", 10, 2 }, { "RcfTxChannel", 8, 2 }, { "RxEchannel", 6, 2 }, { "RcfRxChannel", 5, 1 }, { "RcfDataOutSrdy", 4, 1 }, { "RxDvld", 3, 1 }, { "RxOoDvld", 2, 1 }, { "RxCongestion", 1, 1 }, { "TxCongestion", 0, 1 }, { NULL } }; static const struct field_desc tp_la1[] = { { "CplCmdIn", 56, 8 }, { "CplCmdOut", 48, 8 }, { "ESynOut", 47, 1 }, { "EAckOut", 46, 1 }, { "EFinOut", 45, 1 }, { "ERstOut", 44, 1 }, { "SynIn", 43, 1 }, { "AckIn", 42, 1 }, { "FinIn", 41, 1 }, { "RstIn", 40, 1 }, { "DataIn", 39, 1 }, { "DataInVld", 38, 1 }, { "PadIn", 37, 1 }, { "RxBufEmpty", 36, 1 }, { "RxDdp", 35, 1 }, { "RxFbCongestion", 34, 1 }, { "TxFbCongestion", 33, 1 }, { "TxPktSumSrdy", 32, 1 }, { "RcfUlpType", 28, 4 }, { "Eread", 27, 1 }, { "Ebypass", 26, 1 }, { "Esave", 25, 1 }, { "Static0", 24, 1 }, { "Cread", 23, 1 }, { "Cbypass", 22, 1 }, { "Csave", 21, 1 }, { "CPktOut", 20, 1 }, { "RxPagePoolFull", 18, 2 }, { "RxLpbkPkt", 17, 1 }, { "TxLpbkPkt", 16, 1 }, { "RxVfValid", 15, 1 }, { "SynLearned", 14, 1 }, { "SetDelEntry", 13, 1 }, { "SetInvEntry", 12, 1 }, { "CpcmdDvld", 11, 1 }, { "CpcmdSave", 10, 1 }, { "RxPstructsFull", 8, 2 }, { "EpcmdDvld", 7, 1 }, { "EpcmdFlush", 6, 1 }, { "EpcmdTrimPrefix", 5, 1 }, { "EpcmdTrimPostfix", 4, 1 }, { "ERssIp4Pkt", 3, 1 }, { "ERssIp6Pkt", 2, 1 }, { "ERssTcpUdpPkt", 1, 1 }, { "ERssFceFipPkt", 0, 1 }, { NULL } }; static const struct field_desc tp_la2[] = { { "CplCmdIn", 56, 8 }, { "MpsVfVld", 55, 1 }, { "MpsPf", 52, 3 }, { "MpsVf", 44, 8 }, { "SynIn", 43, 1 }, { "AckIn", 42, 1 }, { "FinIn", 41, 1 }, { "RstIn", 40, 1 }, { "DataIn", 39, 1 }, { "DataInVld", 38, 1 }, { "PadIn", 37, 1 }, { "RxBufEmpty", 36, 1 }, { "RxDdp", 35, 1 }, { "RxFbCongestion", 34, 1 }, { "TxFbCongestion", 33, 1 }, { "TxPktSumSrdy", 32, 1 }, { "RcfUlpType", 28, 4 }, { "Eread", 27, 1 }, { "Ebypass", 26, 1 }, { "Esave", 25, 1 }, { "Static0", 24, 1 }, { "Cread", 23, 1 }, { "Cbypass", 22, 1 }, { "Csave", 21, 1 }, { "CPktOut", 20, 1 }, { "RxPagePoolFull", 18, 2 }, { "RxLpbkPkt", 17, 1 }, { "TxLpbkPkt", 16, 1 }, { "RxVfValid", 15, 1 }, { "SynLearned", 14, 1 }, { "SetDelEntry", 13, 1 }, { "SetInvEntry", 12, 1 }, { "CpcmdDvld", 11, 1 }, { "CpcmdSave", 10, 1 }, { "RxPstructsFull", 8, 2 }, { "EpcmdDvld", 7, 1 }, { "EpcmdFlush", 6, 1 }, { "EpcmdTrimPrefix", 5, 1 }, { "EpcmdTrimPostfix", 4, 1 }, { "ERssIp4Pkt", 3, 1 }, { "ERssIp6Pkt", 2, 1 }, { "ERssTcpUdpPkt", 1, 1 }, { "ERssFceFipPkt", 0, 1 }, { NULL } }; static void tp_la_show(struct sbuf *sb, uint64_t *p, int idx) { field_desc_show(sb, *p, tp_la0); } static void tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) { if (idx) sbuf_printf(sb, "\n"); field_desc_show(sb, p[0], tp_la0); if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) field_desc_show(sb, p[1], tp_la0); } static void tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) { if (idx) sbuf_printf(sb, "\n"); field_desc_show(sb, p[0], tp_la0); if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); } static int sysctl_tp_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; uint64_t *buf, *p; int rc; u_int i, inc; void (*show_func)(struct sbuf *, uint64_t *, int); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { t4_tp_read_la(sc, buf, NULL); switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { case 2: inc = 2; show_func = tp_la_show2; break; case 3: inc = 2; show_func = tp_la_show3; break; default: inc = 1; show_func = tp_la_show; } } mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; p = buf; for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) (*show_func)(sb, p, i); rc = sbuf_finish(sb); done: sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_get_chan_txrate(sc, nrate, orate); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); if (sc->chip_params->nchan > 2) { sbuf_printf(sb, " channel 0 channel 1" " channel 2 channel 3\n"); sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", nrate[0], nrate[1], nrate[2], nrate[3]); sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", orate[0], orate[1], orate[2], orate[3]); } else { sbuf_printf(sb, " channel 0 channel 1\n"); sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", nrate[0], nrate[1]); sbuf_printf(sb, "Offload B/s: %10ju %10ju", orate[0], orate[1]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; uint32_t *buf, *p; int rc, i; rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else t4_ulprx_read_la(sc, buf); mtx_unlock(&sc->reg_lock); if (rc != 0) goto done; p = buf; sbuf_printf(sb, " Pcmd Type Message" " Data"); for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); } rc = sbuf_finish(sb); done: sbuf_delete(sb); free(buf, M_CXGBE); return (rc); } static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; struct sbuf *sb; int rc; uint32_t cfg, s1, s2; MPASS(chip_id(sc) >= CHELSIO_T5); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { cfg = t4_read_reg(sc, A_SGE_STAT_CFG); s1 = t4_read_reg(sc, A_SGE_STAT_TOTAL); s2 = t4_read_reg(sc, A_SGE_STAT_MATCH); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); if (G_STATSOURCE_T5(cfg) == 7) { int mode; mode = is_t5(sc) ? G_STATMODE(cfg) : G_T6_STATMODE(cfg); if (mode == 0) sbuf_printf(sb, "total %d, incomplete %d", s1, s2); else if (mode == 1) sbuf_printf(sb, "total %d, data overflow %d", s1, s2); else sbuf_printf(sb, "unknown mode %d", mode); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_cpus(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; enum cpu_sets op = arg2; cpuset_t cpuset; struct sbuf *sb; int i, rc; MPASS(op == LOCAL_CPUS || op == INTR_CPUS); CPU_ZERO(&cpuset); rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset); if (rc != 0) return (rc); rc = sysctl_wire_old_buffer(req, 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (ENOMEM); CPU_FOREACH(i) sbuf_printf(sb, "%d ", i); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static int sysctl_reset(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; u_int val; int rc; val = sc->num_resets; rc = sysctl_handle_int(oidp, &val, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (val == 0) { /* Zero out the counter that tracks reset. */ sc->num_resets = 0; return (0); } if (val != 1) return (EINVAL); /* 0 or 1 are the only legal values */ if (hw_off_limits(sc)) /* harmless race */ return (EALREADY); taskqueue_enqueue(reset_tq, &sc->reset_task); return (0); } #ifdef TCP_OFFLOAD static int sysctl_tls(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int i, j, v, rc; struct vi_info *vi; v = sc->tt.tls; rc = sysctl_handle_int(oidp, &v, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS)) return (ENOTSUP); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4stls"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else { sc->tt.tls = !!v; for_each_port(sc, i) { for_each_vi(sc->port[i], j, vi) { if (vi->flags & VI_INIT_DONE) t4_update_fl_bufsize(vi->ifp); } } } end_synchronized_op(sc, 0); return (rc); } static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int *old_ports, *new_ports; int i, new_count, rc; if (req->newptr == NULL && req->oldptr == NULL) return (SYSCTL_OUT(req, NULL, imax(sc->tt.num_tls_rx_ports, 1) * sizeof(sc->tt.tls_rx_ports[0]))); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tlsrx"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } if (sc->tt.num_tls_rx_ports == 0) { i = -1; rc = SYSCTL_OUT(req, &i, sizeof(i)); } else rc = SYSCTL_OUT(req, sc->tt.tls_rx_ports, sc->tt.num_tls_rx_ports * sizeof(sc->tt.tls_rx_ports[0])); if (rc == 0 && req->newptr != NULL) { new_count = req->newlen / sizeof(new_ports[0]); new_ports = malloc(new_count * sizeof(new_ports[0]), M_CXGBE, M_WAITOK); rc = SYSCTL_IN(req, new_ports, new_count * sizeof(new_ports[0])); if (rc) goto err; /* Allow setting to a single '-1' to clear the list. */ if (new_count == 1 && new_ports[0] == -1) { ADAPTER_LOCK(sc); old_ports = sc->tt.tls_rx_ports; sc->tt.tls_rx_ports = NULL; sc->tt.num_tls_rx_ports = 0; ADAPTER_UNLOCK(sc); free(old_ports, M_CXGBE); } else { for (i = 0; i < new_count; i++) { if (new_ports[i] < 1 || new_ports[i] > IPPORT_MAX) { rc = EINVAL; goto err; } } ADAPTER_LOCK(sc); old_ports = sc->tt.tls_rx_ports; sc->tt.tls_rx_ports = new_ports; sc->tt.num_tls_rx_ports = new_count; ADAPTER_UNLOCK(sc); free(old_ports, M_CXGBE); new_ports = NULL; } err: free(new_ports, M_CXGBE); } done: end_synchronized_op(sc, 0); return (rc); } static int sysctl_tls_rx_timeout(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int v, rc; v = sc->tt.tls_rx_timeout; rc = sysctl_handle_int(oidp, &v, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (v < 0) return (EINVAL); if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS)) return (ENOTSUP); sc->tt.tls_rx_timeout = v; return (0); } static void unit_conv(char *buf, size_t len, u_int val, u_int factor) { u_int rem = val % factor; if (rem == 0) snprintf(buf, len, "%u", val / factor); else { while (rem % 10 == 0) rem /= 10; snprintf(buf, len, "%u.%u", val / factor, rem); } } static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; char buf[16]; u_int res, re; u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) res = (u_int)-1; else res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); mtx_unlock(&sc->reg_lock); if (res == (u_int)-1) return (ENXIO); switch (arg2) { case 0: /* timer_tick */ re = G_TIMERRESOLUTION(res); break; case 1: /* TCP timestamp tick */ re = G_TIMESTAMPRESOLUTION(res); break; case 2: /* DACK tick */ re = G_DELAYEDACKRESOLUTION(res); break; default: return (EDOOFUS); } unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); } static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc; u_int dack_tmr, dack_re, v; u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; dack_re = G_DELAYEDACKRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); dack_tmr = t4_read_reg(sc, A_TP_DACK_TIMER); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); v = ((cclk_ps << dack_re) / 1000000) * dack_tmr; return (sysctl_handle_int(oidp, &v, 0, req)); } static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, reg = arg2; u_int tre; u_long tp_tick_us, v; u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL || reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); tp_tick_us = (cclk_ps << tre) / 1000000; if (reg == A_TP_INIT_SRTT) v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); else v = tp_tick_us * t4_read_reg(sc, reg); } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); else return (sysctl_handle_long(oidp, &v, 0, req)); } /* * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is * passed to this function. */ static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, idx = arg2; u_int v; MPASS(idx >= 0 && idx <= 24); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf; } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); else return (sysctl_handle_int(oidp, &v, 0, req)); } static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int rc, idx = arg2; u_int shift, v, r; MPASS(idx >= 0 && idx < 16); r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3); shift = (idx & 3) << 3; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else { rc = 0; v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0; } mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); else return (sysctl_handle_int(oidp, &v, 0, req)); } static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int idx, rc, i; struct sge_ofld_rxq *ofld_rxq; uint8_t v; idx = vi->ofld_tmr_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < 0 || idx >= SGE_NTIMERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4otmr"); if (rc) return (rc); v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1); for_each_ofld_rxq(vi, i, ofld_rxq) { #ifdef atomic_store_rel_8 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); #else ofld_rxq->iq.intr_params = v; #endif } vi->ofld_tmr_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (0); } static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS) { struct vi_info *vi = arg1; struct adapter *sc = vi->adapter; int idx, rc; idx = vi->ofld_pktc_idx; rc = sysctl_handle_int(oidp, &idx, 0, req); if (rc != 0 || req->newptr == NULL) return (rc); if (idx < -1 || idx >= SGE_NCOUNTERS) return (EINVAL); rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, "t4opktc"); if (rc) return (rc); if (vi->flags & VI_INIT_DONE) rc = EBUSY; /* cannot be changed once the queues are created */ else vi->ofld_pktc_idx = idx; end_synchronized_op(sc, LOCK_HELD); return (rc); } #endif static int get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) { int rc; if (cntxt->cid > M_CTXTQID) return (EINVAL); if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) return (EINVAL); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } if (sc->flags & FW_OK) { rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); if (rc == 0) goto done; } /* * Read via firmware failed or wasn't even attempted. Read directly via * the backdoor. */ rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); done: end_synchronized_op(sc, 0); return (rc); } static int load_fw(struct adapter *sc, struct t4_data *fw) { int rc; uint8_t *fw_data; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } /* * The firmware, with the sole exception of the memory parity error * handler, runs from memory and not flash. It is almost always safe to * install a new firmware on a running system. Just set bit 1 in * hw.cxgbe.dflags or dev...dflags first. */ if (sc->flags & FULL_INIT_DONE && (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) { rc = EBUSY; goto done; } fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); rc = copyin(fw->data, fw_data, fw->len); if (rc == 0) rc = -t4_load_fw(sc, fw_data, fw->len); free(fw_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int load_cfg(struct adapter *sc, struct t4_data *cfg) { int rc; uint8_t *cfg_data = NULL; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } if (cfg->len == 0) { /* clear */ rc = -t4_load_cfg(sc, NULL, 0); goto done; } cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK); rc = copyin(cfg->data, cfg_data, cfg->len); if (rc == 0) rc = -t4_load_cfg(sc, cfg_data, cfg->len); free(cfg_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int load_boot(struct adapter *sc, struct t4_bootrom *br) { int rc; uint8_t *br_data = NULL; u_int offset; if (br->len > 1024 * 1024) return (EFBIG); if (br->pf_offset == 0) { /* pfidx */ if (br->pfidx_addr > 7) return (EINVAL); offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr, A_PCIE_PF_EXPROM_OFST))); } else if (br->pf_offset == 1) { /* offset */ offset = G_OFFSET(br->pfidx_addr); } else { return (EINVAL); } rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } if (br->len == 0) { /* clear */ rc = -t4_load_boot(sc, NULL, offset, 0); goto done; } br_data = malloc(br->len, M_CXGBE, M_WAITOK); rc = copyin(br->data, br_data, br->len); if (rc == 0) rc = -t4_load_boot(sc, br_data, offset, br->len); free(br_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int load_bootcfg(struct adapter *sc, struct t4_data *bc) { int rc; uint8_t *bc_data = NULL; rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); if (rc) return (rc); if (hw_off_limits(sc)) { rc = ENXIO; goto done; } if (bc->len == 0) { /* clear */ rc = -t4_load_bootcfg(sc, NULL, 0); goto done; } bc_data = malloc(bc->len, M_CXGBE, M_WAITOK); rc = copyin(bc->data, bc_data, bc->len); if (rc == 0) rc = -t4_load_bootcfg(sc, bc_data, bc->len); free(bc_data, M_CXGBE); done: end_synchronized_op(sc, 0); return (rc); } static int cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump) { int rc; struct cudbg_init *cudbg; void *handle, *buf; /* buf is large, don't block if no memory is available */ buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO); if (buf == NULL) return (ENOMEM); handle = cudbg_alloc_handle(); if (handle == NULL) { rc = ENOMEM; goto done; } cudbg = cudbg_get_init(handle); cudbg->adap = sc; cudbg->print = (cudbg_print_cb)printf; #ifndef notyet device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n", __func__, dump->wr_flash, dump->len, dump->data); #endif if (dump->wr_flash) cudbg->use_flash = 1; MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap)); memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap)); rc = cudbg_collect(handle, buf, &dump->len); if (rc != 0) goto done; rc = copyout(buf, dump->data, dump->len); done: cudbg_free_handle(handle); free(buf, M_CXGBE); return (rc); } static void free_offload_policy(struct t4_offload_policy *op) { struct offload_rule *r; int i; if (op == NULL) return; r = &op->rule[0]; for (i = 0; i < op->nrules; i++, r++) { free(r->bpf_prog.bf_insns, M_CXGBE); } free(op->rule, M_CXGBE); free(op, M_CXGBE); } static int set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop) { int i, rc, len; struct t4_offload_policy *op, *old; struct bpf_program *bf; const struct offload_settings *s; struct offload_rule *r; void *u; if (!is_offload(sc)) return (ENODEV); if (uop->nrules == 0) { /* Delete installed policies. */ op = NULL; goto set_policy; } else if (uop->nrules > 256) { /* arbitrary */ return (E2BIG); } /* Copy userspace offload policy to kernel */ op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK); op->nrules = uop->nrules; len = op->nrules * sizeof(struct offload_rule); op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); rc = copyin(uop->rule, op->rule, len); if (rc) { free(op->rule, M_CXGBE); free(op, M_CXGBE); return (rc); } r = &op->rule[0]; for (i = 0; i < op->nrules; i++, r++) { /* Validate open_type */ if (r->open_type != OPEN_TYPE_LISTEN && r->open_type != OPEN_TYPE_ACTIVE && r->open_type != OPEN_TYPE_PASSIVE && r->open_type != OPEN_TYPE_DONTCARE) { error: /* * Rules 0 to i have malloc'd filters that need to be * freed. Rules i+1 to nrules have userspace pointers * and should be left alone. */ op->nrules = i; free_offload_policy(op); return (rc); } /* Validate settings */ s = &r->settings; if ((s->offload != 0 && s->offload != 1) || s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED || s->sched_class < -1 || s->sched_class >= sc->chip_params->nsched_cls) { rc = EINVAL; goto error; } bf = &r->bpf_prog; u = bf->bf_insns; /* userspace ptr */ bf->bf_insns = NULL; if (bf->bf_len == 0) { /* legal, matches everything */ continue; } len = bf->bf_len * sizeof(*bf->bf_insns); bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); rc = copyin(u, bf->bf_insns, len); if (rc != 0) goto error; if (!bpf_validate(bf->bf_insns, bf->bf_len)) { rc = EINVAL; goto error; } } set_policy: rw_wlock(&sc->policy_lock); old = sc->policy; sc->policy = op; rw_wunlock(&sc->policy_lock); free_offload_policy(old); return (0); } #define MAX_READ_BUF_SIZE (128 * 1024) static int read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) { uint32_t addr, remaining, n; uint32_t *buf; int rc; uint8_t *dst; mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else rc = validate_mem_range(sc, mr->addr, mr->len); mtx_unlock(&sc->reg_lock); if (rc != 0) return (rc); buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); addr = mr->addr; remaining = mr->len; dst = (void *)mr->data; while (remaining) { n = min(remaining, MAX_READ_BUF_SIZE); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else read_via_memwin(sc, 2, addr, buf, n); mtx_unlock(&sc->reg_lock); if (rc != 0) break; rc = copyout(buf, dst, n); if (rc != 0) break; dst += n; remaining -= n; addr += n; } free(buf, M_CXGBE); return (rc); } #undef MAX_READ_BUF_SIZE static int read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) { int rc; if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) return (EINVAL); if (i2cd->len > sizeof(i2cd->data)) return (EFBIG); rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); if (rc) return (rc); if (hw_off_limits(sc)) rc = ENXIO; else rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, i2cd->offset, i2cd->len, &i2cd->data[0]); end_synchronized_op(sc, 0); return (rc); } static int clear_stats(struct adapter *sc, u_int port_id) { int i, v, chan_map; struct port_info *pi; struct vi_info *vi; struct sge_rxq *rxq; struct sge_txq *txq; struct sge_wrq *wrq; #if defined(TCP_OFFLOAD) || defined(RATELIMIT) struct sge_ofld_txq *ofld_txq; #endif #ifdef TCP_OFFLOAD struct sge_ofld_rxq *ofld_rxq; #endif if (port_id >= sc->params.nports) return (EINVAL); pi = sc->port[port_id]; if (pi == NULL) return (EIO); mtx_lock(&sc->reg_lock); if (!hw_off_limits(sc)) { /* MAC stats */ t4_clr_port_stats(sc, pi->tx_chan); if (is_t6(sc)) { if (pi->fcs_reg != -1) pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg); else pi->stats.rx_fcs_err = 0; } for_each_vi(pi, v, vi) { if (vi->flags & VI_INIT_DONE) t4_clr_vi_stats(sc, vi->vin); } chan_map = pi->rx_e_chan_map; v = 0; /* reuse */ while (chan_map) { i = ffs(chan_map) - 1; t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1, A_TP_MIB_TNL_CNG_DROP_0 + i); chan_map &= ~(1 << i); } } mtx_unlock(&sc->reg_lock); pi->tx_parse_error = 0; pi->tnl_cong_drops = 0; /* * Since this command accepts a port, clear stats for * all VIs on this port. */ for_each_vi(pi, v, vi) { if (vi->flags & VI_INIT_DONE) { for_each_rxq(vi, i, rxq) { #if defined(INET) || defined(INET6) rxq->lro.lro_queued = 0; rxq->lro.lro_flushed = 0; #endif rxq->rxcsum = 0; rxq->vlan_extraction = 0; rxq->vxlan_rxcsum = 0; rxq->fl.cl_allocated = 0; rxq->fl.cl_recycled = 0; rxq->fl.cl_fast_recycled = 0; } for_each_txq(vi, i, txq) { txq->txcsum = 0; txq->tso_wrs = 0; txq->vlan_insertion = 0; txq->imm_wrs = 0; txq->sgl_wrs = 0; txq->txpkt_wrs = 0; txq->txpkts0_wrs = 0; txq->txpkts1_wrs = 0; txq->txpkts0_pkts = 0; txq->txpkts1_pkts = 0; txq->txpkts_flush = 0; txq->raw_wrs = 0; txq->vxlan_tso_wrs = 0; txq->vxlan_txcsum = 0; txq->kern_tls_records = 0; txq->kern_tls_short = 0; txq->kern_tls_partial = 0; txq->kern_tls_full = 0; txq->kern_tls_octets = 0; txq->kern_tls_waste = 0; txq->kern_tls_options = 0; txq->kern_tls_header = 0; txq->kern_tls_fin = 0; txq->kern_tls_fin_short = 0; txq->kern_tls_cbc = 0; txq->kern_tls_gcm = 0; mp_ring_reset_stats(txq->r); } #if defined(TCP_OFFLOAD) || defined(RATELIMIT) for_each_ofld_txq(vi, i, ofld_txq) { ofld_txq->wrq.tx_wrs_direct = 0; ofld_txq->wrq.tx_wrs_copied = 0; counter_u64_zero(ofld_txq->tx_iscsi_pdus); counter_u64_zero(ofld_txq->tx_iscsi_octets); counter_u64_zero(ofld_txq->tx_toe_tls_records); counter_u64_zero(ofld_txq->tx_toe_tls_octets); } #endif #ifdef TCP_OFFLOAD for_each_ofld_rxq(vi, i, ofld_rxq) { ofld_rxq->fl.cl_allocated = 0; ofld_rxq->fl.cl_recycled = 0; ofld_rxq->fl.cl_fast_recycled = 0; counter_u64_zero( ofld_rxq->rx_iscsi_ddp_setup_ok); counter_u64_zero( ofld_rxq->rx_iscsi_ddp_setup_error); ofld_rxq->rx_iscsi_ddp_pdus = 0; ofld_rxq->rx_iscsi_ddp_octets = 0; ofld_rxq->rx_iscsi_fl_pdus = 0; ofld_rxq->rx_iscsi_fl_octets = 0; ofld_rxq->rx_toe_tls_records = 0; ofld_rxq->rx_toe_tls_octets = 0; } #endif if (IS_MAIN_VI(vi)) { wrq = &sc->sge.ctrlq[pi->port_id]; wrq->tx_wrs_direct = 0; wrq->tx_wrs_copied = 0; } } } return (0); } static int hold_clip_addr(struct adapter *sc, struct t4_clip_addr *ca) { #ifdef INET6 struct in6_addr in6; bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr)); if (t4_get_clip_entry(sc, &in6, true) != NULL) return (0); else return (EIO); #else return (ENOTSUP); #endif } static int release_clip_addr(struct adapter *sc, struct t4_clip_addr *ca) { #ifdef INET6 struct in6_addr in6; bcopy(&ca->addr[0], &in6.s6_addr[0], sizeof(in6.s6_addr)); return (t4_release_clip_addr(sc, &in6)); #else return (ENOTSUP); #endif } int t4_os_find_pci_capability(struct adapter *sc, int cap) { int i; return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); } int t4_os_pci_save_state(struct adapter *sc) { device_t dev; struct pci_devinfo *dinfo; dev = sc->dev; dinfo = device_get_ivars(dev); pci_cfg_save(dev, dinfo, 0); return (0); } int t4_os_pci_restore_state(struct adapter *sc) { device_t dev; struct pci_devinfo *dinfo; dev = sc->dev; dinfo = device_get_ivars(dev); pci_cfg_restore(dev, dinfo); return (0); } void t4_os_portmod_changed(struct port_info *pi) { struct adapter *sc = pi->adapter; struct vi_info *vi; struct ifnet *ifp; static const char *mod_str[] = { NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" }; KASSERT((pi->flags & FIXED_IFMEDIA) == 0, ("%s: port_type %u", __func__, pi->port_type)); vi = &pi->vi[0]; if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) { PORT_LOCK(pi); build_medialist(pi); if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) { fixup_link_config(pi); apply_link_config(pi); } PORT_UNLOCK(pi); end_synchronized_op(sc, LOCK_HELD); } ifp = vi->ifp; if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) if_printf(ifp, "transceiver unplugged.\n"); else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) if_printf(ifp, "unknown transceiver inserted.\n"); else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) if_printf(ifp, "unsupported transceiver inserted.\n"); else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { if_printf(ifp, "%dGbps %s transceiver inserted.\n", port_top_speed(pi), mod_str[pi->mod_type]); } else { if_printf(ifp, "transceiver (type %d) inserted.\n", pi->mod_type); } } void t4_os_link_changed(struct port_info *pi) { struct vi_info *vi; struct ifnet *ifp; struct link_config *lc = &pi->link_cfg; struct adapter *sc = pi->adapter; int v; PORT_LOCK_ASSERT_OWNED(pi); if (is_t6(sc)) { if (lc->link_ok) { if (lc->speed > 25000 || (lc->speed == 25000 && lc->fec == FEC_RS)) { pi->fcs_reg = T5_PORT_REG(pi->tx_chan, A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS); } else { pi->fcs_reg = T5_PORT_REG(pi->tx_chan, A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS); } pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg); pi->stats.rx_fcs_err = 0; } else { pi->fcs_reg = -1; } } else { MPASS(pi->fcs_reg != -1); MPASS(pi->fcs_base == 0); } for_each_vi(pi, v, vi) { ifp = vi->ifp; if (ifp == NULL) continue; if (lc->link_ok) { ifp->if_baudrate = IF_Mbps(lc->speed); if_link_state_change(ifp, LINK_STATE_UP); } else { if_link_state_change(ifp, LINK_STATE_DOWN); } } } void t4_iterate(void (*func)(struct adapter *, void *), void *arg) { struct adapter *sc; sx_slock(&t4_list_lock); SLIST_FOREACH(sc, &t4_list, link) { /* * func should not make any assumptions about what state sc is * in - the only guarantee is that sc->sc_lock is a valid lock. */ func(sc, arg); } sx_sunlock(&t4_list_lock); } static int t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, struct thread *td) { int rc; struct adapter *sc = dev->si_drv1; rc = priv_check(td, PRIV_DRIVER); if (rc != 0) return (rc); switch (cmd) { case CHELSIO_T4_GETREG: { struct t4_reg *edata = (struct t4_reg *)data; if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) return (EFAULT); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else if (edata->size == 4) edata->val = t4_read_reg(sc, edata->addr); else if (edata->size == 8) edata->val = t4_read_reg64(sc, edata->addr); else rc = EINVAL; mtx_unlock(&sc->reg_lock); break; } case CHELSIO_T4_SETREG: { struct t4_reg *edata = (struct t4_reg *)data; if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) return (EFAULT); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else if (edata->size == 4) { if (edata->val & 0xffffffff00000000) rc = EINVAL; t4_write_reg(sc, edata->addr, (uint32_t) edata->val); } else if (edata->size == 8) t4_write_reg64(sc, edata->addr, edata->val); else rc = EINVAL; mtx_unlock(&sc->reg_lock); break; } case CHELSIO_T4_REGDUMP: { struct t4_regdump *regs = (struct t4_regdump *)data; int reglen = t4_get_regs_len(sc); uint8_t *buf; if (regs->len < reglen) { regs->len = reglen; /* hint to the caller */ return (ENOBUFS); } regs->len = reglen; buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); mtx_lock(&sc->reg_lock); if (hw_off_limits(sc)) rc = ENXIO; else get_regs(sc, regs, buf); mtx_unlock(&sc->reg_lock); if (rc == 0) rc = copyout(buf, regs->data, reglen); free(buf, M_CXGBE); break; } case CHELSIO_T4_GET_FILTER_MODE: rc = get_filter_mode(sc, (uint32_t *)data); break; case CHELSIO_T4_SET_FILTER_MODE: rc = set_filter_mode(sc, *(uint32_t *)data); break; case CHELSIO_T4_SET_FILTER_MASK: rc = set_filter_mask(sc, *(uint32_t *)data); break; case CHELSIO_T4_GET_FILTER: rc = get_filter(sc, (struct t4_filter *)data); break; case CHELSIO_T4_SET_FILTER: rc = set_filter(sc, (struct t4_filter *)data); break; case CHELSIO_T4_DEL_FILTER: rc = del_filter(sc, (struct t4_filter *)data); break; case CHELSIO_T4_GET_SGE_CONTEXT: rc = get_sge_context(sc, (struct t4_sge_context *)data); break; case CHELSIO_T4_LOAD_FW: rc = load_fw(sc, (struct t4_data *)data); break; case CHELSIO_T4_GET_MEM: rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); break; case CHELSIO_T4_GET_I2C: rc = read_i2c(sc, (struct t4_i2c_data *)data); break; case CHELSIO_T4_CLEAR_STATS: rc = clear_stats(sc, *(uint32_t *)data); break; case CHELSIO_T4_SCHED_CLASS: rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); break; case CHELSIO_T4_SCHED_QUEUE: rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); break; case CHELSIO_T4_GET_TRACER: rc = t4_get_tracer(sc, (struct t4_tracer *)data); break; case CHELSIO_T4_SET_TRACER: rc = t4_set_tracer(sc, (struct t4_tracer *)data); break; case CHELSIO_T4_LOAD_CFG: rc = load_cfg(sc, (struct t4_data *)data); break; case CHELSIO_T4_LOAD_BOOT: rc = load_boot(sc, (struct t4_bootrom *)data); break; case CHELSIO_T4_LOAD_BOOTCFG: rc = load_bootcfg(sc, (struct t4_data *)data); break; case CHELSIO_T4_CUDBG_DUMP: rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data); break; case CHELSIO_T4_SET_OFLD_POLICY: rc = set_offload_policy(sc, (struct t4_offload_policy *)data); break; case CHELSIO_T4_HOLD_CLIP_ADDR: rc = hold_clip_addr(sc, (struct t4_clip_addr *)data); break; case CHELSIO_T4_RELEASE_CLIP_ADDR: rc = release_clip_addr(sc, (struct t4_clip_addr *)data); break; default: rc = ENOTTY; } return (rc); } #ifdef TCP_OFFLOAD static int toe_capability(struct vi_info *vi, bool enable) { int rc; struct port_info *pi = vi->pi; struct adapter *sc = pi->adapter; ASSERT_SYNCHRONIZED_OP(sc); if (!is_offload(sc)) return (ENODEV); if (hw_off_limits(sc)) return (ENXIO); if (enable) { #ifdef KERN_TLS if (sc->flags & KERN_TLS_ON) { int i, j, n; struct port_info *p; struct vi_info *v; /* * Reconfigure hardware for TOE if TXTLS is not enabled * on any ifnet. */ n = 0; for_each_port(sc, i) { p = sc->port[i]; for_each_vi(p, j, v) { if (v->ifp->if_capenable & IFCAP_TXTLS) { CH_WARN(sc, "%s has NIC TLS enabled.\n", device_get_nameunit(v->dev)); n++; } } } if (n > 0) { CH_WARN(sc, "Disable NIC TLS on all interfaces " "associated with this adapter before " "trying to enable TOE.\n"); return (EAGAIN); } rc = t4_config_kern_tls(sc, false); if (rc) return (rc); } #endif if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { /* TOE is already enabled. */ return (0); } /* * We need the port's queues around so that we're able to send * and receive CPLs to/from the TOE even if the ifnet for this * port has never been UP'd administratively. */ if (!(vi->flags & VI_INIT_DONE) && ((rc = vi_init(vi)) != 0)) return (rc); if (!(pi->vi[0].flags & VI_INIT_DONE) && ((rc = vi_init(&pi->vi[0])) != 0)) return (rc); if (isset(&sc->offload_map, pi->port_id)) { /* TOE is enabled on another VI of this port. */ pi->uld_vis++; return (0); } if (!uld_active(sc, ULD_TOM)) { rc = t4_activate_uld(sc, ULD_TOM); if (rc == EAGAIN) { log(LOG_WARNING, "You must kldload t4_tom.ko before trying " "to enable TOE on a cxgbe interface.\n"); } if (rc != 0) return (rc); KASSERT(sc->tom_softc != NULL, ("%s: TOM activated but softc NULL", __func__)); KASSERT(uld_active(sc, ULD_TOM), ("%s: TOM activated but flag not set", __func__)); } /* Activate iWARP and iSCSI too, if the modules are loaded. */ if (!uld_active(sc, ULD_IWARP)) (void) t4_activate_uld(sc, ULD_IWARP); if (!uld_active(sc, ULD_ISCSI)) (void) t4_activate_uld(sc, ULD_ISCSI); pi->uld_vis++; setbit(&sc->offload_map, pi->port_id); } else { pi->uld_vis--; if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) return (0); KASSERT(uld_active(sc, ULD_TOM), ("%s: TOM never initialized?", __func__)); clrbit(&sc->offload_map, pi->port_id); } return (0); } /* * Add an upper layer driver to the global list. */ int t4_register_uld(struct uld_info *ui) { int rc = 0; struct uld_info *u; sx_xlock(&t4_uld_list_lock); SLIST_FOREACH(u, &t4_uld_list, link) { if (u->uld_id == ui->uld_id) { rc = EEXIST; goto done; } } SLIST_INSERT_HEAD(&t4_uld_list, ui, link); ui->refcount = 0; done: sx_xunlock(&t4_uld_list_lock); return (rc); } int t4_unregister_uld(struct uld_info *ui) { int rc = EINVAL; struct uld_info *u; sx_xlock(&t4_uld_list_lock); SLIST_FOREACH(u, &t4_uld_list, link) { if (u == ui) { if (ui->refcount > 0) { rc = EBUSY; goto done; } SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); rc = 0; goto done; } } done: sx_xunlock(&t4_uld_list_lock); return (rc); } int t4_activate_uld(struct adapter *sc, int id) { int rc; struct uld_info *ui; ASSERT_SYNCHRONIZED_OP(sc); if (id < 0 || id > ULD_MAX) return (EINVAL); rc = EAGAIN; /* kldoad the module with this ULD and try again. */ sx_slock(&t4_uld_list_lock); SLIST_FOREACH(ui, &t4_uld_list, link) { if (ui->uld_id == id) { if (!(sc->flags & FULL_INIT_DONE)) { rc = adapter_init(sc); if (rc != 0) break; } rc = ui->activate(sc); if (rc == 0) { setbit(&sc->active_ulds, id); ui->refcount++; } break; } } sx_sunlock(&t4_uld_list_lock); return (rc); } int t4_deactivate_uld(struct adapter *sc, int id) { int rc; struct uld_info *ui; ASSERT_SYNCHRONIZED_OP(sc); if (id < 0 || id > ULD_MAX) return (EINVAL); rc = ENXIO; sx_slock(&t4_uld_list_lock); SLIST_FOREACH(ui, &t4_uld_list, link) { if (ui->uld_id == id) { rc = ui->deactivate(sc); if (rc == 0) { clrbit(&sc->active_ulds, id); ui->refcount--; } break; } } sx_sunlock(&t4_uld_list_lock); return (rc); } static void t4_async_event(void *arg, int n) { struct uld_info *ui; struct adapter *sc = (struct adapter *)arg; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4async") != 0) return; sx_slock(&t4_uld_list_lock); SLIST_FOREACH(ui, &t4_uld_list, link) { if (ui->uld_id == ULD_IWARP) { ui->async_event(sc); break; } } sx_sunlock(&t4_uld_list_lock); end_synchronized_op(sc, 0); } int uld_active(struct adapter *sc, int uld_id) { MPASS(uld_id >= 0 && uld_id <= ULD_MAX); return (isset(&sc->active_ulds, uld_id)); } #endif #ifdef KERN_TLS static int ktls_capability(struct adapter *sc, bool enable) { ASSERT_SYNCHRONIZED_OP(sc); if (!is_ktls(sc)) return (ENODEV); if (hw_off_limits(sc)) return (ENXIO); if (enable) { if (sc->flags & KERN_TLS_ON) return (0); /* already on */ if (sc->offload_map != 0) { CH_WARN(sc, "Disable TOE on all interfaces associated with " "this adapter before trying to enable NIC TLS.\n"); return (EAGAIN); } return (t4_config_kern_tls(sc, true)); } else { /* * Nothing to do for disable. If TOE is enabled sometime later * then toe_capability will reconfigure the hardware. */ return (0); } } #endif /* * t = ptr to tunable. * nc = number of CPUs. * c = compiled in default for that tunable. */ static void calculate_nqueues(int *t, int nc, const int c) { int nq; if (*t > 0) return; nq = *t < 0 ? -*t : c; *t = min(nc, nq); } /* * Come up with reasonable defaults for some of the tunables, provided they're * not set by the user (in which case we'll use the values as is). */ static void tweak_tunables(void) { int nc = mp_ncpus; /* our snapshot of the number of CPUs */ if (t4_ntxq < 1) { #ifdef RSS t4_ntxq = rss_getnumbuckets(); #else calculate_nqueues(&t4_ntxq, nc, NTXQ); #endif } calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI); if (t4_nrxq < 1) { #ifdef RSS t4_nrxq = rss_getnumbuckets(); #else calculate_nqueues(&t4_nrxq, nc, NRXQ); #endif } calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI); #if defined(TCP_OFFLOAD) || defined(RATELIMIT) calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ); calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI); #endif #ifdef TCP_OFFLOAD calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ); calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI); #endif #if defined(TCP_OFFLOAD) || defined(KERN_TLS) if (t4_toecaps_allowed == -1) t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; #else if (t4_toecaps_allowed == -1) t4_toecaps_allowed = 0; #endif #ifdef TCP_OFFLOAD if (t4_rdmacaps_allowed == -1) { t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | FW_CAPS_CONFIG_RDMA_RDMAC; } if (t4_iscsicaps_allowed == -1) { t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | FW_CAPS_CONFIG_ISCSI_TARGET_PDU | FW_CAPS_CONFIG_ISCSI_T10DIF; } if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS) t4_tmr_idx_ofld = TMR_IDX_OFLD; if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS) t4_pktc_idx_ofld = PKTC_IDX_OFLD; if (t4_toe_tls_rx_timeout < 0) t4_toe_tls_rx_timeout = 0; #else if (t4_rdmacaps_allowed == -1) t4_rdmacaps_allowed = 0; if (t4_iscsicaps_allowed == -1) t4_iscsicaps_allowed = 0; #endif #ifdef DEV_NETMAP calculate_nqueues(&t4_nnmtxq, nc, NNMTXQ); calculate_nqueues(&t4_nnmrxq, nc, NNMRXQ); calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI); calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI); #endif if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS) t4_tmr_idx = TMR_IDX; if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS) t4_pktc_idx = PKTC_IDX; if (t4_qsize_txq < 128) t4_qsize_txq = 128; if (t4_qsize_rxq < 128) t4_qsize_rxq = 128; while (t4_qsize_rxq & 7) t4_qsize_rxq++; t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; /* * Number of VIs to create per-port. The first VI is the "main" regular * VI for the port. The rest are additional virtual interfaces on the * same physical port. Note that the main VI does not have native * netmap support but the extra VIs do. * * Limit the number of VIs per port to the number of available * MAC addresses per port. */ if (t4_num_vis < 1) t4_num_vis = 1; if (t4_num_vis > nitems(vi_mac_funcs)) { t4_num_vis = nitems(vi_mac_funcs); printf("cxgbe: number of VIs limited to %d\n", t4_num_vis); } if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) { pcie_relaxed_ordering = 1; #if defined(__i386__) || defined(__amd64__) if (cpu_vendor_id == CPU_VENDOR_INTEL) pcie_relaxed_ordering = 0; #endif } } #ifdef DDB static void t4_dump_tcb(struct adapter *sc, int tid) { uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos; reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); save = t4_read_reg(sc, reg); base = sc->memwin[2].mw_base; /* Dump TCB for the tid */ tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); tcb_addr += tid * TCB_SIZE; if (is_t4(sc)) { pf = 0; win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */ } else { pf = V_PFNUM(sc->pf); win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */ } t4_write_reg(sc, reg, win_pos | pf); t4_read_reg(sc, reg); off = tcb_addr - win_pos; for (i = 0; i < 4; i++) { uint32_t buf[8]; for (j = 0; j < 8; j++, off += 4) buf[j] = htonl(t4_read_reg(sc, base + off)); db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); } t4_write_reg(sc, reg, save); t4_read_reg(sc, reg); } static void t4_dump_devlog(struct adapter *sc) { struct devlog_params *dparams = &sc->params.devlog; struct fw_devlog_e e; int i, first, j, m, nentries, rc; uint64_t ftstamp = UINT64_MAX; if (dparams->start == 0) { db_printf("devlog params not valid\n"); return; } nentries = dparams->size / sizeof(struct fw_devlog_e); m = fwmtype_to_hwmtype(dparams->memtype); /* Find the first entry. */ first = -1; for (i = 0; i < nentries && !db_pager_quit; i++) { rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), sizeof(e), (void *)&e); if (rc != 0) break; if (e.timestamp == 0) break; e.timestamp = be64toh(e.timestamp); if (e.timestamp < ftstamp) { ftstamp = e.timestamp; first = i; } } if (first == -1) return; i = first; do { rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), sizeof(e), (void *)&e); if (rc != 0) return; if (e.timestamp == 0) return; e.timestamp = be64toh(e.timestamp); e.seqno = be32toh(e.seqno); for (j = 0; j < 8; j++) e.params[j] = be32toh(e.params[j]); db_printf("%10d %15ju %8s %8s ", e.seqno, e.timestamp, (e.level < nitems(devlog_level_strings) ? devlog_level_strings[e.level] : "UNKNOWN"), (e.facility < nitems(devlog_facility_strings) ? devlog_facility_strings[e.facility] : "UNKNOWN")); db_printf(e.fmt, e.params[0], e.params[1], e.params[2], e.params[3], e.params[4], e.params[5], e.params[6], e.params[7]); if (++i == nentries) i = 0; } while (i != first && !db_pager_quit); } static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table); _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table); DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL) { device_t dev; int t; bool valid; valid = false; t = db_read_token(); if (t == tIDENT) { dev = device_lookup_by_name(db_tok_string); valid = true; } db_skip_to_eol(); if (!valid) { db_printf("usage: show t4 devlog \n"); return; } if (dev == NULL) { db_printf("device not found\n"); return; } t4_dump_devlog(device_get_softc(dev)); } DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL) { device_t dev; int radix, tid, t; bool valid; valid = false; radix = db_radix; db_radix = 10; t = db_read_token(); if (t == tIDENT) { dev = device_lookup_by_name(db_tok_string); t = db_read_token(); if (t == tNUMBER) { tid = db_tok_number; valid = true; } } db_radix = radix; db_skip_to_eol(); if (!valid) { db_printf("usage: show t4 tcb \n"); return; } if (dev == NULL) { db_printf("device not found\n"); return; } if (tid < 0) { db_printf("invalid tid\n"); return; } t4_dump_tcb(device_get_softc(dev), tid); } #endif static eventhandler_tag vxlan_start_evtag; static eventhandler_tag vxlan_stop_evtag; struct vxlan_evargs { struct ifnet *ifp; uint16_t port; }; static void enable_vxlan_rx(struct adapter *sc) { int i, rc; struct port_info *pi; uint8_t match_all_mac[ETHER_ADDR_LEN] = {0}; ASSERT_SYNCHRONIZED_OP(sc); t4_write_reg(sc, A_MPS_RX_VXLAN_TYPE, V_VXLAN(sc->vxlan_port) | F_VXLAN_EN); for_each_port(sc, i) { pi = sc->port[i]; if (pi->vxlan_tcam_entry == true) continue; rc = t4_alloc_raw_mac_filt(sc, pi->vi[0].viid, match_all_mac, match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id, true); if (rc < 0) { rc = -rc; CH_ERR(&pi->vi[0], "failed to add VXLAN TCAM entry: %d.\n", rc); } else { MPASS(rc == sc->rawf_base + pi->port_id); pi->vxlan_tcam_entry = true; } } } static void t4_vxlan_start(struct adapter *sc, void *arg) { struct vxlan_evargs *v = arg; if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5) return; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxst") != 0) return; if (sc->vxlan_refcount == 0) { sc->vxlan_port = v->port; sc->vxlan_refcount = 1; if (!hw_off_limits(sc)) enable_vxlan_rx(sc); } else if (sc->vxlan_port == v->port) { sc->vxlan_refcount++; } else { CH_ERR(sc, "VXLAN already configured on port %d; " "ignoring attempt to configure it on port %d\n", sc->vxlan_port, v->port); } end_synchronized_op(sc, 0); } static void t4_vxlan_stop(struct adapter *sc, void *arg) { struct vxlan_evargs *v = arg; if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5) return; if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxsp") != 0) return; /* * VXLANs may have been configured before the driver was loaded so we * may see more stops than starts. This is not handled cleanly but at * least we keep the refcount sane. */ if (sc->vxlan_port != v->port) goto done; if (sc->vxlan_refcount == 0) { CH_ERR(sc, "VXLAN operation on port %d was stopped earlier; " "ignoring attempt to stop it again.\n", sc->vxlan_port); } else if (--sc->vxlan_refcount == 0 && !hw_off_limits(sc)) t4_set_reg_field(sc, A_MPS_RX_VXLAN_TYPE, F_VXLAN_EN, 0); done: end_synchronized_op(sc, 0); } static void t4_vxlan_start_handler(void *arg __unused, struct ifnet *ifp, sa_family_t family, u_int port) { struct vxlan_evargs v; MPASS(family == AF_INET || family == AF_INET6); v.ifp = ifp; v.port = port; t4_iterate(t4_vxlan_start, &v); } static void t4_vxlan_stop_handler(void *arg __unused, struct ifnet *ifp, sa_family_t family, u_int port) { struct vxlan_evargs v; MPASS(family == AF_INET || family == AF_INET6); v.ifp = ifp; v.port = port; t4_iterate(t4_vxlan_stop, &v); } static struct sx mlu; /* mod load unload */ SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); static int mod_event(module_t mod, int cmd, void *arg) { int rc = 0; static int loaded = 0; switch (cmd) { case MOD_LOAD: sx_xlock(&mlu); if (loaded++ == 0) { t4_sge_modload(); t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, t4_filter_rpl, CPL_COOKIE_FILTER); t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl, CPL_COOKIE_FILTER); t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER); t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER); t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS, t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER); t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); t4_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); sx_init(&t4_list_lock, "T4/T5 adapters"); SLIST_INIT(&t4_list); callout_init(&fatal_callout, 1); #ifdef TCP_OFFLOAD sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); SLIST_INIT(&t4_uld_list); #endif #ifdef INET6 t4_clip_modload(); #endif #ifdef KERN_TLS t6_ktls_modload(); #endif t4_tracer_modload(); tweak_tunables(); vxlan_start_evtag = EVENTHANDLER_REGISTER(vxlan_start, t4_vxlan_start_handler, NULL, EVENTHANDLER_PRI_ANY); vxlan_stop_evtag = EVENTHANDLER_REGISTER(vxlan_stop, t4_vxlan_stop_handler, NULL, EVENTHANDLER_PRI_ANY); reset_tq = taskqueue_create("t4_rst_tq", M_WAITOK, taskqueue_thread_enqueue, &reset_tq); taskqueue_start_threads(&reset_tq, 1, PI_SOFT, "t4_rst_thr"); } sx_xunlock(&mlu); break; case MOD_UNLOAD: sx_xlock(&mlu); if (--loaded == 0) { int tries; taskqueue_free(reset_tq); sx_slock(&t4_list_lock); if (!SLIST_EMPTY(&t4_list)) { rc = EBUSY; sx_sunlock(&t4_list_lock); goto done_unload; } #ifdef TCP_OFFLOAD sx_slock(&t4_uld_list_lock); if (!SLIST_EMPTY(&t4_uld_list)) { rc = EBUSY; sx_sunlock(&t4_uld_list_lock); sx_sunlock(&t4_list_lock); goto done_unload; } #endif tries = 0; while (tries++ < 5 && t4_sge_extfree_refs() != 0) { uprintf("%ju clusters with custom free routine " "still is use.\n", t4_sge_extfree_refs()); pause("t4unload", 2 * hz); } #ifdef TCP_OFFLOAD sx_sunlock(&t4_uld_list_lock); #endif sx_sunlock(&t4_list_lock); if (t4_sge_extfree_refs() == 0) { EVENTHANDLER_DEREGISTER(vxlan_start, vxlan_start_evtag); EVENTHANDLER_DEREGISTER(vxlan_stop, vxlan_stop_evtag); t4_tracer_modunload(); #ifdef KERN_TLS t6_ktls_modunload(); #endif #ifdef INET6 t4_clip_modunload(); #endif #ifdef TCP_OFFLOAD sx_destroy(&t4_uld_list_lock); #endif sx_destroy(&t4_list_lock); t4_sge_modunload(); loaded = 0; } else { rc = EBUSY; loaded++; /* undo earlier decrement */ } } done_unload: sx_xunlock(&mlu); break; } return (rc); } static devclass_t t4_devclass, t5_devclass, t6_devclass; static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass; static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass; DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); MODULE_VERSION(t4nex, 1); MODULE_DEPEND(t4nex, firmware, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(t4nex, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); MODULE_VERSION(t5nex, 1); MODULE_DEPEND(t5nex, firmware, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(t5nex, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0); MODULE_VERSION(t6nex, 1); MODULE_DEPEND(t6nex, firmware, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(t6nex, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); MODULE_VERSION(cxgbe, 1); DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); MODULE_VERSION(cxl, 1); DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0); MODULE_VERSION(cc, 1); DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); MODULE_VERSION(vcxgbe, 1); DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); MODULE_VERSION(vcxl, 1); DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0); MODULE_VERSION(vcc, 1); diff --git a/sys/dev/fdt/simplebus.c b/sys/dev/fdt/simplebus.c index 4496d2caaad7..76814ed40b04 100644 --- a/sys/dev/fdt/simplebus.c +++ b/sys/dev/fdt/simplebus.c @@ -1,459 +1,459 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include /* * Bus interface. */ static int simplebus_probe(device_t dev); static struct resource *simplebus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static void simplebus_probe_nomatch(device_t bus, device_t child); static int simplebus_print_child(device_t bus, device_t child); static device_t simplebus_add_child(device_t dev, u_int order, const char *name, int unit); static struct resource_list *simplebus_get_resource_list(device_t bus, device_t child); /* * ofw_bus interface */ static const struct ofw_bus_devinfo *simplebus_get_devinfo(device_t bus, device_t child); /* * Driver methods. */ static device_method_t simplebus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, simplebus_probe), DEVMETHOD(device_attach, simplebus_attach), DEVMETHOD(device_detach, simplebus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, simplebus_add_child), DEVMETHOD(bus_print_child, simplebus_print_child), DEVMETHOD(bus_probe_nomatch, simplebus_probe_nomatch), DEVMETHOD(bus_read_ivar, bus_generic_read_ivar), DEVMETHOD(bus_write_ivar, bus_generic_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, simplebus_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), - DEVMETHOD(bus_child_pnpinfo_str, ofw_bus_gen_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_get_resource_list, simplebus_get_resource_list), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, simplebus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; DEFINE_CLASS_0(simplebus, simplebus_driver, simplebus_methods, sizeof(struct simplebus_softc)); static devclass_t simplebus_devclass; EARLY_DRIVER_MODULE(simplebus, ofwbus, simplebus_driver, simplebus_devclass, 0, 0, BUS_PASS_BUS); EARLY_DRIVER_MODULE(simplebus, simplebus, simplebus_driver, simplebus_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); static int simplebus_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); /* * XXX We should attach only to pure' compatible = "simple-bus"', * without any other compatible string. * For now, filter only know cases: * "syscon", "simple-bus"; is handled by fdt/syscon driver * "simple-mfd", "simple-bus"; is handled by fdt/simple-mfd driver */ if (ofw_bus_is_compatible(dev, "syscon") || ofw_bus_is_compatible(dev, "simple-mfd")) return (ENXIO); /* * FDT data puts a "simple-bus" compatible string on many things that * have children but aren't really buses in our world. Without a * ranges property we will fail to attach, so just fail to probe too. */ if (!(ofw_bus_is_compatible(dev, "simple-bus") && ofw_bus_has_prop(dev, "ranges")) && (ofw_bus_get_type(dev) == NULL || strcmp(ofw_bus_get_type(dev), "soc") != 0)) return (ENXIO); device_set_desc(dev, "Flattened device tree simple bus"); return (BUS_PROBE_GENERIC); } int simplebus_attach_impl(device_t dev) { struct simplebus_softc *sc; phandle_t node; sc = device_get_softc(dev); simplebus_init(dev, 0); if ((sc->flags & SB_FLAG_NO_RANGES) == 0 && simplebus_fill_ranges(sc->node, sc) < 0) { device_printf(dev, "could not get ranges\n"); return (ENXIO); } /* * In principle, simplebus could have an interrupt map, but ignore that * for now */ for (node = OF_child(sc->node); node > 0; node = OF_peer(node)) simplebus_add_device(dev, node, 0, NULL, -1, NULL); return (0); } int simplebus_attach(device_t dev) { int rv; rv = simplebus_attach_impl(dev); if (rv != 0) return (rv); return (bus_generic_attach(dev)); } int simplebus_detach(device_t dev) { struct simplebus_softc *sc; sc = device_get_softc(dev); if (sc->ranges != NULL) free(sc->ranges, M_DEVBUF); return (bus_generic_detach(dev)); } void simplebus_init(device_t dev, phandle_t node) { struct simplebus_softc *sc; sc = device_get_softc(dev); if (node == 0) node = ofw_bus_get_node(dev); sc->dev = dev; sc->node = node; /* * Some important numbers */ sc->acells = 2; OF_getencprop(node, "#address-cells", &sc->acells, sizeof(sc->acells)); sc->scells = 1; OF_getencprop(node, "#size-cells", &sc->scells, sizeof(sc->scells)); } int simplebus_fill_ranges(phandle_t node, struct simplebus_softc *sc) { int host_address_cells; cell_t *base_ranges; ssize_t nbase_ranges; int err; int i, j, k; err = OF_searchencprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); if (err <= 0) return (-1); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges < 0) return (-1); sc->nranges = nbase_ranges / sizeof(cell_t) / (sc->acells + host_address_cells + sc->scells); if (sc->nranges == 0) return (0); sc->ranges = malloc(sc->nranges * sizeof(sc->ranges[0]), M_DEVBUF, M_WAITOK); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < sc->nranges; i++) { sc->ranges[i].bus = 0; for (k = 0; k < sc->acells; k++) { sc->ranges[i].bus <<= 32; sc->ranges[i].bus |= base_ranges[j++]; } sc->ranges[i].host = 0; for (k = 0; k < host_address_cells; k++) { sc->ranges[i].host <<= 32; sc->ranges[i].host |= base_ranges[j++]; } sc->ranges[i].size = 0; for (k = 0; k < sc->scells; k++) { sc->ranges[i].size <<= 32; sc->ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (sc->nranges); } struct simplebus_devinfo * simplebus_setup_dinfo(device_t dev, phandle_t node, struct simplebus_devinfo *di) { struct simplebus_softc *sc; struct simplebus_devinfo *ndi; sc = device_get_softc(dev); if (di == NULL) ndi = malloc(sizeof(*ndi), M_DEVBUF, M_WAITOK | M_ZERO); else ndi = di; if (ofw_bus_gen_setup_devinfo(&ndi->obdinfo, node) != 0) { if (di == NULL) free(ndi, M_DEVBUF); return (NULL); } resource_list_init(&ndi->rl); ofw_bus_reg_to_rl(dev, node, sc->acells, sc->scells, &ndi->rl); ofw_bus_intr_to_rl(dev, node, &ndi->rl, NULL); return (ndi); } device_t simplebus_add_device(device_t dev, phandle_t node, u_int order, const char *name, int unit, struct simplebus_devinfo *di) { struct simplebus_devinfo *ndi; device_t cdev; if ((ndi = simplebus_setup_dinfo(dev, node, di)) == NULL) return (NULL); cdev = device_add_child_ordered(dev, order, name, unit); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", ndi->obdinfo.obd_name); resource_list_free(&ndi->rl); ofw_bus_gen_destroy_devinfo(&ndi->obdinfo); if (di == NULL) free(ndi, M_DEVBUF); return (NULL); } device_set_ivars(cdev, ndi); return(cdev); } static device_t simplebus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t cdev; struct simplebus_devinfo *ndi; cdev = device_add_child_ordered(dev, order, name, unit); if (cdev == NULL) return (NULL); ndi = malloc(sizeof(*ndi), M_DEVBUF, M_WAITOK | M_ZERO); ndi->obdinfo.obd_node = -1; resource_list_init(&ndi->rl); device_set_ivars(cdev, ndi); return (cdev); } static const struct ofw_bus_devinfo * simplebus_get_devinfo(device_t bus __unused, device_t child) { struct simplebus_devinfo *ndi; ndi = device_get_ivars(child); if (ndi == NULL) return (NULL); return (&ndi->obdinfo); } static struct resource_list * simplebus_get_resource_list(device_t bus __unused, device_t child) { struct simplebus_devinfo *ndi; ndi = device_get_ivars(child); if (ndi == NULL) return (NULL); return (&ndi->rl); } static struct resource * simplebus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct simplebus_softc *sc; struct simplebus_devinfo *di; struct resource_list_entry *rle; int j; sc = device_get_softc(bus); /* * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; rle = resource_list_find(&di->rl, type, *rid); if (rle == NULL) { if (bootverbose) device_printf(bus, "no default resources for " "rid = %d, type = %d\n", *rid, type); return (NULL); } start = rle->start; end = rle->end; count = rle->count; } if (type == SYS_RES_MEMORY) { /* Remap through ranges property */ for (j = 0; j < sc->nranges; j++) { if (start >= sc->ranges[j].bus && end < sc->ranges[j].bus + sc->ranges[j].size) { start -= sc->ranges[j].bus; start += sc->ranges[j].host; end -= sc->ranges[j].bus; end += sc->ranges[j].host; break; } } if (j == sc->nranges && sc->nranges != 0) { if (bootverbose) device_printf(bus, "Could not map resource " "%#jx-%#jx\n", start, end); return (NULL); } } return (bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int simplebus_print_res(struct simplebus_devinfo *di) { int rv; if (di == NULL) return (0); rv = 0; rv += resource_list_print_type(&di->rl, "mem", SYS_RES_MEMORY, "%#jx"); rv += resource_list_print_type(&di->rl, "irq", SYS_RES_IRQ, "%jd"); return (rv); } static void simplebus_probe_nomatch(device_t bus, device_t child) { const char *name, *type, *compat; if (!bootverbose) return; compat = ofw_bus_get_compat(child); if (compat == NULL) return; name = ofw_bus_get_name(child); type = ofw_bus_get_type(child); device_printf(bus, "<%s>", name != NULL ? name : "unknown"); simplebus_print_res(device_get_ivars(child)); if (!ofw_bus_status_okay(child)) printf(" disabled"); if (type) printf(" type %s", type); printf(" compat %s (no driver attached)\n", compat); } static int simplebus_print_child(device_t bus, device_t child) { int rv; rv = bus_print_child_header(bus, child); rv += simplebus_print_res(device_get_ivars(child)); if (!ofw_bus_status_okay(child)) rv += printf(" disabled"); rv += bus_print_child_footer(bus, child); return (rv); } diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c index e1f74d81955e..4ed8e14aedb9 100644 --- a/sys/dev/gpio/gpiobus.c +++ b/sys/dev/gpio/gpiobus.c @@ -1,1142 +1,1120 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #ifdef INTRNG #include #endif #include #include #include +#include #include #include "gpiobus_if.h" #undef GPIOBUS_DEBUG #ifdef GPIOBUS_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif -static void gpiobus_print_pins(struct gpiobus_ivar *, char *, size_t); +static void gpiobus_print_pins(struct gpiobus_ivar *, struct sbuf *); static int gpiobus_parse_pins(struct gpiobus_softc *, device_t, int); static int gpiobus_probe(device_t); static int gpiobus_attach(device_t); static int gpiobus_detach(device_t); static int gpiobus_suspend(device_t); static int gpiobus_resume(device_t); static void gpiobus_probe_nomatch(device_t, device_t); static int gpiobus_print_child(device_t, device_t); -static int gpiobus_child_location_str(device_t, device_t, char *, size_t); -static int gpiobus_child_pnpinfo_str(device_t, device_t, char *, size_t); +static int gpiobus_child_location(device_t, device_t, struct sbuf *); static device_t gpiobus_add_child(device_t, u_int, const char *, int); static void gpiobus_hinted_child(device_t, const char *, int); /* * GPIOBUS interface */ static int gpiobus_acquire_bus(device_t, device_t, int); static void gpiobus_release_bus(device_t, device_t); static int gpiobus_pin_setflags(device_t, device_t, uint32_t, uint32_t); static int gpiobus_pin_getflags(device_t, device_t, uint32_t, uint32_t*); static int gpiobus_pin_getcaps(device_t, device_t, uint32_t, uint32_t*); static int gpiobus_pin_set(device_t, device_t, uint32_t, unsigned int); static int gpiobus_pin_get(device_t, device_t, uint32_t, unsigned int*); static int gpiobus_pin_toggle(device_t, device_t, uint32_t); /* * gpiobus_pin flags * The flags in struct gpiobus_pin are not related to the flags used by the * low-level controller driver in struct gpio_pin. Currently, only pins * acquired via FDT data have gpiobus_pin.flags set, sourced from the flags in * the FDT properties. In theory, these flags are defined per-platform. In * practice they are always the flags from the dt-bindings/gpio/gpio.h file. * The only one of those flags we currently support is for handling active-low * pins, so we just define that flag here instead of including a GPL'd header. */ #define GPIO_ACTIVE_LOW 1 /* * XXX -> Move me to better place - gpio_subr.c? * Also, this function must be changed when interrupt configuration * data will be moved into struct resource. */ #ifdef INTRNG struct resource * gpio_alloc_intr_resource(device_t consumer_dev, int *rid, u_int alloc_flags, gpio_pin_t pin, uint32_t intr_mode) { u_int irq; struct intr_map_data_gpio *gpio_data; struct resource *res; gpio_data = (struct intr_map_data_gpio *)intr_alloc_map_data( INTR_MAP_DATA_GPIO, sizeof(*gpio_data), M_WAITOK | M_ZERO); gpio_data->gpio_pin_num = pin->pin; gpio_data->gpio_pin_flags = pin->flags; gpio_data->gpio_intr_mode = intr_mode; irq = intr_map_irq(pin->dev, 0, (struct intr_map_data *)gpio_data); res = bus_alloc_resource(consumer_dev, SYS_RES_IRQ, rid, irq, irq, 1, alloc_flags); if (res == NULL) { intr_free_intr_map_data((struct intr_map_data *)gpio_data); return (NULL); } rman_set_virtual(res, gpio_data); return (res); } #else struct resource * gpio_alloc_intr_resource(device_t consumer_dev, int *rid, u_int alloc_flags, gpio_pin_t pin, uint32_t intr_mode) { return (NULL); } #endif int gpio_check_flags(uint32_t caps, uint32_t flags) { /* Filter unwanted flags. */ flags &= caps; /* Cannot mix input/output together. */ if (flags & GPIO_PIN_INPUT && flags & GPIO_PIN_OUTPUT) return (EINVAL); /* Cannot mix pull-up/pull-down together. */ if (flags & GPIO_PIN_PULLUP && flags & GPIO_PIN_PULLDOWN) return (EINVAL); /* Cannot mix output and interrupt flags together */ if (flags & GPIO_PIN_OUTPUT && flags & GPIO_INTR_MASK) return (EINVAL); /* Only one interrupt flag can be defined at once */ if ((flags & GPIO_INTR_MASK) & ((flags & GPIO_INTR_MASK) - 1)) return (EINVAL); /* The interrupt attached flag cannot be set */ if (flags & GPIO_INTR_ATTACHED) return (EINVAL); return (0); } int gpio_pin_get_by_bus_pinnum(device_t busdev, uint32_t pinnum, gpio_pin_t *ppin) { gpio_pin_t pin; int err; err = gpiobus_acquire_pin(busdev, pinnum); if (err != 0) return (EBUSY); pin = malloc(sizeof(*pin), M_DEVBUF, M_WAITOK | M_ZERO); pin->dev = device_get_parent(busdev); pin->pin = pinnum; pin->flags = 0; *ppin = pin; return (0); } int gpio_pin_get_by_child_index(device_t childdev, uint32_t idx, gpio_pin_t *ppin) { struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(childdev); if (idx >= devi->npins) return (EINVAL); return (gpio_pin_get_by_bus_pinnum(device_get_parent(childdev), devi->pins[idx], ppin)); } int gpio_pin_getcaps(gpio_pin_t pin, uint32_t *caps) { KASSERT(pin != NULL, ("GPIO pin is NULL.")); KASSERT(pin->dev != NULL, ("GPIO pin device is NULL.")); return (GPIO_PIN_GETCAPS(pin->dev, pin->pin, caps)); } int gpio_pin_is_active(gpio_pin_t pin, bool *active) { int rv; uint32_t tmp; KASSERT(pin != NULL, ("GPIO pin is NULL.")); KASSERT(pin->dev != NULL, ("GPIO pin device is NULL.")); rv = GPIO_PIN_GET(pin->dev, pin->pin, &tmp); if (rv != 0) { return (rv); } if (pin->flags & GPIO_ACTIVE_LOW) *active = tmp == 0; else *active = tmp != 0; return (0); } void gpio_pin_release(gpio_pin_t gpio) { device_t busdev; if (gpio == NULL) return; KASSERT(gpio->dev != NULL, ("GPIO pin device is NULL.")); busdev = GPIO_GET_BUS(gpio->dev); if (busdev != NULL) gpiobus_release_pin(busdev, gpio->pin); free(gpio, M_DEVBUF); } int gpio_pin_set_active(gpio_pin_t pin, bool active) { int rv; uint32_t tmp; if (pin->flags & GPIO_ACTIVE_LOW) tmp = active ? 0 : 1; else tmp = active ? 1 : 0; KASSERT(pin != NULL, ("GPIO pin is NULL.")); KASSERT(pin->dev != NULL, ("GPIO pin device is NULL.")); rv = GPIO_PIN_SET(pin->dev, pin->pin, tmp); return (rv); } int gpio_pin_setflags(gpio_pin_t pin, uint32_t flags) { int rv; KASSERT(pin != NULL, ("GPIO pin is NULL.")); KASSERT(pin->dev != NULL, ("GPIO pin device is NULL.")); rv = GPIO_PIN_SETFLAGS(pin->dev, pin->pin, flags); return (rv); } static void -gpiobus_print_pins(struct gpiobus_ivar *devi, char *buf, size_t buflen) +gpiobus_print_pins(struct gpiobus_ivar *devi, struct sbuf *sb) { - char tmp[128]; int i, range_start, range_stop, need_coma; if (devi->npins == 0) return; need_coma = 0; range_start = range_stop = devi->pins[0]; for (i = 1; i < devi->npins; i++) { if (devi->pins[i] != (range_stop + 1)) { if (need_coma) - strlcat(buf, ",", buflen); - memset(tmp, 0, sizeof(tmp)); + sbuf_cat(sb, ","); if (range_start != range_stop) - snprintf(tmp, sizeof(tmp) - 1, "%d-%d", - range_start, range_stop); + sbuf_printf(sb, "%d-%d", range_start, range_stop); else - snprintf(tmp, sizeof(tmp) - 1, "%d", - range_start); - strlcat(buf, tmp, buflen); - + sbuf_printf(sb, "%d", range_start); range_start = range_stop = devi->pins[i]; need_coma = 1; } else range_stop++; } if (need_coma) - strlcat(buf, ",", buflen); - memset(tmp, 0, sizeof(tmp)); + sbuf_cat(sb, ","); if (range_start != range_stop) - snprintf(tmp, sizeof(tmp) - 1, "%d-%d", - range_start, range_stop); + sbuf_printf(sb, "%d-%d", range_start, range_stop); else - snprintf(tmp, sizeof(tmp) - 1, "%d", - range_start); - strlcat(buf, tmp, buflen); + sbuf_printf(sb, "%d", range_start); } device_t gpiobus_attach_bus(device_t dev) { device_t busdev; busdev = device_add_child(dev, "gpiobus", -1); if (busdev == NULL) return (NULL); if (device_add_child(dev, "gpioc", -1) == NULL) { device_delete_child(dev, busdev); return (NULL); } #ifdef FDT ofw_gpiobus_register_provider(dev); #endif bus_generic_attach(dev); return (busdev); } int gpiobus_detach_bus(device_t dev) { int err; #ifdef FDT ofw_gpiobus_unregister_provider(dev); #endif err = bus_generic_detach(dev); if (err != 0) return (err); return (device_delete_children(dev)); } int gpiobus_init_softc(device_t dev) { struct gpiobus_softc *sc; sc = GPIOBUS_SOFTC(dev); sc->sc_busdev = dev; sc->sc_dev = device_get_parent(dev); sc->sc_intr_rman.rm_type = RMAN_ARRAY; sc->sc_intr_rman.rm_descr = "GPIO Interrupts"; if (rman_init(&sc->sc_intr_rman) != 0 || rman_manage_region(&sc->sc_intr_rman, 0, ~0) != 0) panic("%s: failed to set up rman.", __func__); if (GPIO_PIN_MAX(sc->sc_dev, &sc->sc_npins) != 0) return (ENXIO); KASSERT(sc->sc_npins >= 0, ("GPIO device with no pins")); /* Pins = GPIO_PIN_MAX() + 1 */ sc->sc_npins++; sc->sc_pins = malloc(sizeof(*sc->sc_pins) * sc->sc_npins, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->sc_pins == NULL) return (ENOMEM); /* Initialize the bus lock. */ GPIOBUS_LOCK_INIT(sc); return (0); } int gpiobus_alloc_ivars(struct gpiobus_ivar *devi) { /* Allocate pins and flags memory. */ devi->pins = malloc(sizeof(uint32_t) * devi->npins, M_DEVBUF, M_NOWAIT | M_ZERO); if (devi->pins == NULL) return (ENOMEM); return (0); } void gpiobus_free_ivars(struct gpiobus_ivar *devi) { if (devi->pins) { free(devi->pins, M_DEVBUF); devi->pins = NULL; } devi->npins = 0; } int gpiobus_acquire_pin(device_t bus, uint32_t pin) { struct gpiobus_softc *sc; sc = device_get_softc(bus); /* Consistency check. */ if (pin >= sc->sc_npins) { device_printf(bus, "invalid pin %d, max: %d\n", pin, sc->sc_npins - 1); return (-1); } /* Mark pin as mapped and give warning if it's already mapped. */ if (sc->sc_pins[pin].mapped) { device_printf(bus, "warning: pin %d is already mapped\n", pin); return (-1); } sc->sc_pins[pin].mapped = 1; return (0); } /* Release mapped pin */ int gpiobus_release_pin(device_t bus, uint32_t pin) { struct gpiobus_softc *sc; sc = device_get_softc(bus); /* Consistency check. */ if (pin >= sc->sc_npins) { device_printf(bus, "invalid pin %d, max=%d\n", pin, sc->sc_npins - 1); return (-1); } if (!sc->sc_pins[pin].mapped) { device_printf(bus, "pin %d is not mapped\n", pin); return (-1); } sc->sc_pins[pin].mapped = 0; return (0); } static int gpiobus_acquire_child_pins(device_t dev, device_t child) { struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); int i; for (i = 0; i < devi->npins; i++) { /* Reserve the GPIO pin. */ if (gpiobus_acquire_pin(dev, devi->pins[i]) != 0) { device_printf(child, "cannot acquire pin %d\n", devi->pins[i]); while (--i >= 0) { (void)gpiobus_release_pin(dev, devi->pins[i]); } gpiobus_free_ivars(devi); return (EBUSY); } } for (i = 0; i < devi->npins; i++) { /* Use the child name as pin name. */ GPIOBUS_PIN_SETNAME(dev, devi->pins[i], device_get_nameunit(child)); } return (0); } static int gpiobus_parse_pins(struct gpiobus_softc *sc, device_t child, int mask) { struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); int i, npins; npins = 0; for (i = 0; i < 32; i++) { if (mask & (1 << i)) npins++; } if (npins == 0) { device_printf(child, "empty pin mask\n"); return (EINVAL); } devi->npins = npins; if (gpiobus_alloc_ivars(devi) != 0) { device_printf(child, "cannot allocate device ivars\n"); return (EINVAL); } npins = 0; for (i = 0; i < 32; i++) { if ((mask & (1 << i)) == 0) continue; devi->pins[npins++] = i; } return (0); } static int gpiobus_parse_pin_list(struct gpiobus_softc *sc, device_t child, const char *pins) { struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); const char *p; char *endp; unsigned long pin; int i, npins; npins = 0; p = pins; for (;;) { pin = strtoul(p, &endp, 0); if (endp == p) break; npins++; if (*endp == '\0') break; p = endp + 1; } if (*endp != '\0') { device_printf(child, "garbage in the pin list: %s\n", endp); return (EINVAL); } if (npins == 0) { device_printf(child, "empty pin list\n"); return (EINVAL); } devi->npins = npins; if (gpiobus_alloc_ivars(devi) != 0) { device_printf(child, "cannot allocate device ivars\n"); return (EINVAL); } i = 0; p = pins; for (;;) { pin = strtoul(p, &endp, 0); devi->pins[i] = pin; if (*endp == '\0') break; i++; p = endp + 1; } return (0); } static int gpiobus_probe(device_t dev) { device_set_desc(dev, "GPIO bus"); return (BUS_PROBE_GENERIC); } static int gpiobus_attach(device_t dev) { int err; err = gpiobus_init_softc(dev); if (err != 0) return (err); /* * Get parent's pins and mark them as unmapped */ bus_generic_probe(dev); bus_enumerate_hinted_children(dev); return (bus_generic_attach(dev)); } /* * Since this is not a self-enumerating bus, and since we always add * children in attach, we have to always delete children here. */ static int gpiobus_detach(device_t dev) { struct gpiobus_softc *sc; struct gpiobus_ivar *devi; device_t *devlist; int i, err, ndevs; sc = GPIOBUS_SOFTC(dev); KASSERT(mtx_initialized(&sc->sc_mtx), ("gpiobus mutex not initialized")); GPIOBUS_LOCK_DESTROY(sc); if ((err = bus_generic_detach(dev)) != 0) return (err); if ((err = device_get_children(dev, &devlist, &ndevs)) != 0) return (err); for (i = 0; i < ndevs; i++) { devi = GPIOBUS_IVAR(devlist[i]); gpiobus_free_ivars(devi); resource_list_free(&devi->rl); free(devi, M_DEVBUF); device_delete_child(dev, devlist[i]); } free(devlist, M_TEMP); rman_fini(&sc->sc_intr_rman); if (sc->sc_pins) { for (i = 0; i < sc->sc_npins; i++) { if (sc->sc_pins[i].name != NULL) free(sc->sc_pins[i].name, M_DEVBUF); sc->sc_pins[i].name = NULL; } free(sc->sc_pins, M_DEVBUF); sc->sc_pins = NULL; } return (0); } static int gpiobus_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int gpiobus_resume(device_t dev) { return (bus_generic_resume(dev)); } static void gpiobus_probe_nomatch(device_t dev, device_t child) { char pins[128]; + struct sbuf sb; struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(child); - memset(pins, 0, sizeof(pins)); - gpiobus_print_pins(devi, pins, sizeof(pins)); - if (devi->npins > 1) - device_printf(dev, " at pins %s", pins); - else - device_printf(dev, " at pin %s", pins); + sbuf_new(&sb, pins, sizeof(pins), SBUF_FIXEDLEN); + gpiobus_print_pins(devi, &sb); + sbuf_finish(&sb); + device_printf(dev, " at pin%s %s", + devi->npins > 1 ? "s" : "", sbuf_data(&sb)); resource_list_print_type(&devi->rl, "irq", SYS_RES_IRQ, "%jd"); printf("\n"); } static int gpiobus_print_child(device_t dev, device_t child) { char pins[128]; + struct sbuf sb; int retval = 0; struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(child); - memset(pins, 0, sizeof(pins)); retval += bus_print_child_header(dev, child); if (devi->npins > 0) { if (devi->npins > 1) retval += printf(" at pins "); else retval += printf(" at pin "); - gpiobus_print_pins(devi, pins, sizeof(pins)); - retval += printf("%s", pins); + sbuf_new(&sb, pins, sizeof(pins), SBUF_FIXEDLEN); + gpiobus_print_pins(devi, &sb); + sbuf_finish(&sb); + retval += printf("%s", sbuf_data(&sb)); } resource_list_print_type(&devi->rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static int -gpiobus_child_location_str(device_t bus, device_t child, char *buf, - size_t buflen) +gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb) { struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(child); - if (devi->npins > 1) - strlcpy(buf, "pins=", buflen); - else - strlcpy(buf, "pin=", buflen); - gpiobus_print_pins(devi, buf, buflen); - - return (0); -} - -static int -gpiobus_child_pnpinfo_str(device_t bus, device_t child, char *buf, - size_t buflen) -{ + sbuf_printf(sb, "pins="); + gpiobus_print_pins(devi, sb); - *buf = '\0'; return (0); } static device_t gpiobus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct gpiobus_ivar *devi; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(sizeof(struct gpiobus_ivar), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (NULL); } resource_list_init(&devi->rl); device_set_ivars(child, devi); return (child); } static int gpiobus_rescan(device_t dev) { /* * Re-scan is supposed to remove and add children, but if someone has * deleted the hints for a child we attached earlier, we have no easy * way to handle that. So this just attaches new children for whom new * hints or drivers have arrived since we last tried. */ bus_enumerate_hinted_children(dev); bus_generic_attach(dev); return (0); } static void gpiobus_hinted_child(device_t bus, const char *dname, int dunit) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(bus); struct gpiobus_ivar *devi; device_t child; const char *pins; int irq, pinmask; if (device_find_child(bus, dname, dunit) != NULL) { return; } child = BUS_ADD_CHILD(bus, 0, dname, dunit); devi = GPIOBUS_IVAR(child); if (resource_int_value(dname, dunit, "pins", &pinmask) == 0) { if (gpiobus_parse_pins(sc, child, pinmask)) { resource_list_free(&devi->rl); free(devi, M_DEVBUF); device_delete_child(bus, child); return; } } else if (resource_string_value(dname, dunit, "pin_list", &pins) == 0) { if (gpiobus_parse_pin_list(sc, child, pins)) { resource_list_free(&devi->rl); free(devi, M_DEVBUF); device_delete_child(bus, child); return; } } if (resource_int_value(dname, dunit, "irq", &irq) == 0) { if (bus_set_resource(child, SYS_RES_IRQ, 0, irq, 1) != 0) device_printf(bus, "warning: bus_set_resource() failed\n"); } } static int gpiobus_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct gpiobus_ivar *devi; struct resource_list_entry *rle; dprintf("%s: entry (%p, %p, %d, %d, %p, %ld)\n", __func__, dev, child, type, rid, (void *)(intptr_t)start, count); devi = GPIOBUS_IVAR(child); rle = resource_list_add(&devi->rl, type, rid, start, start + count - 1, count); if (rle == NULL) return (ENXIO); return (0); } static int gpiobus_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(child); switch (which) { case GPIOBUS_IVAR_NPINS: *result = devi->npins; break; case GPIOBUS_IVAR_PINS: /* Children do not ever need to directly examine this. */ return (ENOTSUP); default: return (ENOENT); } return (0); } static int gpiobus_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct gpiobus_ivar *devi; const uint32_t *ptr; int i; devi = GPIOBUS_IVAR(child); switch (which) { case GPIOBUS_IVAR_NPINS: /* GPIO ivars are set once. */ if (devi->npins != 0) { return (EBUSY); } devi->npins = value; if (gpiobus_alloc_ivars(devi) != 0) { device_printf(child, "cannot allocate device ivars\n"); devi->npins = 0; return (ENOMEM); } break; case GPIOBUS_IVAR_PINS: ptr = (const uint32_t *)value; for (i = 0; i < devi->npins; i++) devi->pins[i] = ptr[i]; if (gpiobus_acquire_child_pins(dev, child) != 0) return (EBUSY); break; default: return (ENOENT); } return (0); } static struct resource * gpiobus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct gpiobus_softc *sc; struct resource *rv; struct resource_list *rl; struct resource_list_entry *rle; int isdefault; if (type != SYS_RES_IRQ) return (NULL); isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); rle = NULL; if (isdefault) { rl = BUS_GET_RESOURCE_LIST(bus, child); if (rl == NULL) return (NULL); rle = resource_list_find(rl, type, *rid); if (rle == NULL) return (NULL); if (rle->res != NULL) panic("%s: resource entry is busy", __func__); start = rle->start; count = rle->count; end = rle->end; } sc = device_get_softc(bus); rv = rman_reserve_resource(&sc->sc_intr_rman, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if ((flags & RF_ACTIVE) != 0 && bus_activate_resource(child, type, *rid, rv) != 0) { rman_release_resource(rv); return (NULL); } return (rv); } static int gpiobus_release_resource(device_t bus __unused, device_t child, int type, int rid, struct resource *r) { int error; if (rman_get_flags(r) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, r); if (error) return (error); } return (rman_release_resource(r)); } static struct resource_list * gpiobus_get_resource_list(device_t bus __unused, device_t child) { struct gpiobus_ivar *ivar; ivar = GPIOBUS_IVAR(child); return (&ivar->rl); } static int gpiobus_acquire_bus(device_t busdev, device_t child, int how) { struct gpiobus_softc *sc; sc = device_get_softc(busdev); GPIOBUS_ASSERT_UNLOCKED(sc); GPIOBUS_LOCK(sc); if (sc->sc_owner != NULL) { if (sc->sc_owner == child) panic("%s: %s still owns the bus.", device_get_nameunit(busdev), device_get_nameunit(child)); if (how == GPIOBUS_DONTWAIT) { GPIOBUS_UNLOCK(sc); return (EWOULDBLOCK); } while (sc->sc_owner != NULL) mtx_sleep(sc, &sc->sc_mtx, 0, "gpiobuswait", 0); } sc->sc_owner = child; GPIOBUS_UNLOCK(sc); return (0); } static void gpiobus_release_bus(device_t busdev, device_t child) { struct gpiobus_softc *sc; sc = device_get_softc(busdev); GPIOBUS_ASSERT_UNLOCKED(sc); GPIOBUS_LOCK(sc); if (sc->sc_owner == NULL) panic("%s: %s releasing unowned bus.", device_get_nameunit(busdev), device_get_nameunit(child)); if (sc->sc_owner != child) panic("%s: %s trying to release bus owned by %s", device_get_nameunit(busdev), device_get_nameunit(child), device_get_nameunit(sc->sc_owner)); sc->sc_owner = NULL; wakeup(sc); GPIOBUS_UNLOCK(sc); } static int gpiobus_pin_setflags(device_t dev, device_t child, uint32_t pin, uint32_t flags) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); uint32_t caps; if (pin >= devi->npins) return (EINVAL); if (GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], &caps) != 0) return (EINVAL); if (gpio_check_flags(caps, flags) != 0) return (EINVAL); return (GPIO_PIN_SETFLAGS(sc->sc_dev, devi->pins[pin], flags)); } static int gpiobus_pin_getflags(device_t dev, device_t child, uint32_t pin, uint32_t *flags) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags); } static int gpiobus_pin_getcaps(device_t dev, device_t child, uint32_t pin, uint32_t *caps) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps); } static int gpiobus_pin_set(device_t dev, device_t child, uint32_t pin, unsigned int value) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value); } static int gpiobus_pin_get(device_t dev, device_t child, uint32_t pin, unsigned int *value) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value); } static int gpiobus_pin_toggle(device_t dev, device_t child, uint32_t pin) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]); } static int gpiobus_pin_getname(device_t dev, uint32_t pin, char *name) { struct gpiobus_softc *sc; sc = GPIOBUS_SOFTC(dev); if (pin > sc->sc_npins) return (EINVAL); /* Did we have a name for this pin ? */ if (sc->sc_pins[pin].name != NULL) { memcpy(name, sc->sc_pins[pin].name, GPIOMAXNAME); return (0); } /* Return the default pin name. */ return (GPIO_PIN_GETNAME(device_get_parent(dev), pin, name)); } static int gpiobus_pin_setname(device_t dev, uint32_t pin, const char *name) { struct gpiobus_softc *sc; sc = GPIOBUS_SOFTC(dev); if (pin > sc->sc_npins) return (EINVAL); if (name == NULL) return (EINVAL); /* Save the pin name. */ if (sc->sc_pins[pin].name == NULL) sc->sc_pins[pin].name = malloc(GPIOMAXNAME, M_DEVBUF, M_WAITOK | M_ZERO); strlcpy(sc->sc_pins[pin].name, name, GPIOMAXNAME); return (0); } static device_method_t gpiobus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gpiobus_probe), DEVMETHOD(device_attach, gpiobus_attach), DEVMETHOD(device_detach, gpiobus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, gpiobus_suspend), DEVMETHOD(device_resume, gpiobus_resume), /* Bus interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_config_intr, bus_generic_config_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_set_resource, gpiobus_set_resource), DEVMETHOD(bus_alloc_resource, gpiobus_alloc_resource), DEVMETHOD(bus_release_resource, gpiobus_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_get_resource_list, gpiobus_get_resource_list), DEVMETHOD(bus_add_child, gpiobus_add_child), DEVMETHOD(bus_rescan, gpiobus_rescan), DEVMETHOD(bus_probe_nomatch, gpiobus_probe_nomatch), DEVMETHOD(bus_print_child, gpiobus_print_child), - DEVMETHOD(bus_child_pnpinfo_str, gpiobus_child_pnpinfo_str), - DEVMETHOD(bus_child_location_str, gpiobus_child_location_str), + DEVMETHOD(bus_child_location, gpiobus_child_location), DEVMETHOD(bus_hinted_child, gpiobus_hinted_child), DEVMETHOD(bus_read_ivar, gpiobus_read_ivar), DEVMETHOD(bus_write_ivar, gpiobus_write_ivar), /* GPIO protocol */ DEVMETHOD(gpiobus_acquire_bus, gpiobus_acquire_bus), DEVMETHOD(gpiobus_release_bus, gpiobus_release_bus), DEVMETHOD(gpiobus_pin_getflags, gpiobus_pin_getflags), DEVMETHOD(gpiobus_pin_getcaps, gpiobus_pin_getcaps), DEVMETHOD(gpiobus_pin_setflags, gpiobus_pin_setflags), DEVMETHOD(gpiobus_pin_get, gpiobus_pin_get), DEVMETHOD(gpiobus_pin_set, gpiobus_pin_set), DEVMETHOD(gpiobus_pin_toggle, gpiobus_pin_toggle), DEVMETHOD(gpiobus_pin_getname, gpiobus_pin_getname), DEVMETHOD(gpiobus_pin_setname, gpiobus_pin_setname), DEVMETHOD_END }; driver_t gpiobus_driver = { "gpiobus", gpiobus_methods, sizeof(struct gpiobus_softc) }; devclass_t gpiobus_devclass; EARLY_DRIVER_MODULE(gpiobus, gpio, gpiobus_driver, gpiobus_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(gpiobus, 1); diff --git a/sys/dev/gpio/ofw_gpiobus.c b/sys/dev/gpio/ofw_gpiobus.c index 5d2353b54ba3..5c11f65baf7f 100644 --- a/sys/dev/gpio/ofw_gpiobus.c +++ b/sys/dev/gpio/ofw_gpiobus.c @@ -1,517 +1,517 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009, Nathan Whitehorn * Copyright (c) 2013, Luiz Otavio O Souza * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include "gpiobus_if.h" static struct ofw_gpiobus_devinfo *ofw_gpiobus_setup_devinfo(device_t, device_t, phandle_t); static void ofw_gpiobus_destroy_devinfo(device_t, struct ofw_gpiobus_devinfo *); static int ofw_gpiobus_parse_gpios_impl(device_t, phandle_t, char *, struct gpiobus_softc *, struct gpiobus_pin **); /* * Utility functions for easier handling of OFW GPIO pins. * * !!! BEWARE !!! * GPIOBUS uses children's IVARs, so we cannot use this interface for cross * tree consumers. * */ int gpio_pin_get_by_ofw_propidx(device_t consumer, phandle_t cnode, char *prop_name, int idx, gpio_pin_t *out_pin) { phandle_t xref; pcell_t *cells; device_t busdev; struct gpiobus_pin pin; int ncells, rv; KASSERT(consumer != NULL && cnode > 0, ("both consumer and cnode required")); rv = ofw_bus_parse_xref_list_alloc(cnode, prop_name, "#gpio-cells", idx, &xref, &ncells, &cells); if (rv != 0) return (rv); /* Translate provider to device. */ pin.dev = OF_device_from_xref(xref); if (pin.dev == NULL) { OF_prop_free(cells); return (ENODEV); } /* Test if GPIO bus already exist. */ busdev = GPIO_GET_BUS(pin.dev); if (busdev == NULL) { OF_prop_free(cells); return (ENODEV); } /* Map GPIO pin. */ rv = gpio_map_gpios(pin.dev, cnode, OF_node_from_xref(xref), ncells, cells, &pin.pin, &pin.flags); OF_prop_free(cells); if (rv != 0) return (ENXIO); /* Reserve GPIO pin. */ rv = gpiobus_acquire_pin(busdev, pin.pin); if (rv != 0) return (EBUSY); *out_pin = malloc(sizeof(struct gpiobus_pin), M_DEVBUF, M_WAITOK | M_ZERO); **out_pin = pin; return (0); } int gpio_pin_get_by_ofw_idx(device_t consumer, phandle_t node, int idx, gpio_pin_t *pin) { return (gpio_pin_get_by_ofw_propidx(consumer, node, "gpios", idx, pin)); } int gpio_pin_get_by_ofw_property(device_t consumer, phandle_t node, char *name, gpio_pin_t *pin) { return (gpio_pin_get_by_ofw_propidx(consumer, node, name, 0, pin)); } int gpio_pin_get_by_ofw_name(device_t consumer, phandle_t node, char *name, gpio_pin_t *pin) { int rv, idx; KASSERT(consumer != NULL && node > 0, ("both consumer and node required")); rv = ofw_bus_find_string_index(node, "gpio-names", name, &idx); if (rv != 0) return (rv); return (gpio_pin_get_by_ofw_idx(consumer, node, idx, pin)); } /* * OFW_GPIOBUS driver. */ device_t ofw_gpiobus_add_fdt_child(device_t bus, const char *drvname, phandle_t child) { device_t childdev; int i; struct gpiobus_ivar *devi; struct ofw_gpiobus_devinfo *dinfo; /* * Check to see if we already have a child for @p child, and if so * return it. */ childdev = ofw_bus_find_child_device_by_phandle(bus, child); if (childdev != NULL) return (childdev); /* * Set up the GPIO child and OFW bus layer devinfo and add it to bus. */ childdev = device_add_child(bus, drvname, -1); if (childdev == NULL) return (NULL); dinfo = ofw_gpiobus_setup_devinfo(bus, childdev, child); if (dinfo == NULL) { device_delete_child(bus, childdev); return (NULL); } if (device_probe_and_attach(childdev) != 0) { ofw_gpiobus_destroy_devinfo(bus, dinfo); device_delete_child(bus, childdev); return (NULL); } /* Use the child name as pin name. */ devi = &dinfo->opd_dinfo; for (i = 0; i < devi->npins; i++) GPIOBUS_PIN_SETNAME(bus, devi->pins[i], device_get_nameunit(childdev)); return (childdev); } int ofw_gpiobus_parse_gpios(device_t consumer, char *pname, struct gpiobus_pin **pins) { return (ofw_gpiobus_parse_gpios_impl(consumer, ofw_bus_get_node(consumer), pname, NULL, pins)); } void ofw_gpiobus_register_provider(device_t provider) { phandle_t node; node = ofw_bus_get_node(provider); if (node != -1) OF_device_register_xref(OF_xref_from_node(node), provider); } void ofw_gpiobus_unregister_provider(device_t provider) { phandle_t node; node = ofw_bus_get_node(provider); if (node != -1) OF_device_register_xref(OF_xref_from_node(node), NULL); } static struct ofw_gpiobus_devinfo * ofw_gpiobus_setup_devinfo(device_t bus, device_t child, phandle_t node) { int i, npins; struct gpiobus_ivar *devi; struct gpiobus_pin *pins; struct gpiobus_softc *sc; struct ofw_gpiobus_devinfo *dinfo; sc = device_get_softc(bus); dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (dinfo == NULL) return (NULL); if (ofw_bus_gen_setup_devinfo(&dinfo->opd_obdinfo, node) != 0) { free(dinfo, M_DEVBUF); return (NULL); } /* Parse the gpios property for the child. */ npins = ofw_gpiobus_parse_gpios_impl(child, node, "gpios", sc, &pins); if (npins <= 0) { ofw_bus_gen_destroy_devinfo(&dinfo->opd_obdinfo); free(dinfo, M_DEVBUF); return (NULL); } /* Initialize the irq resource list. */ resource_list_init(&dinfo->opd_dinfo.rl); /* Allocate the child ivars and copy the parsed pin data. */ devi = &dinfo->opd_dinfo; devi->npins = (uint32_t)npins; if (gpiobus_alloc_ivars(devi) != 0) { free(pins, M_DEVBUF); ofw_gpiobus_destroy_devinfo(bus, dinfo); return (NULL); } for (i = 0; i < devi->npins; i++) devi->pins[i] = pins[i].pin; free(pins, M_DEVBUF); /* Parse the interrupt resources. */ if (ofw_bus_intr_to_rl(bus, node, &dinfo->opd_dinfo.rl, NULL) != 0) { ofw_gpiobus_destroy_devinfo(bus, dinfo); return (NULL); } device_set_ivars(child, dinfo); return (dinfo); } static void ofw_gpiobus_destroy_devinfo(device_t bus, struct ofw_gpiobus_devinfo *dinfo) { int i; struct gpiobus_ivar *devi; struct gpiobus_softc *sc; sc = device_get_softc(bus); devi = &dinfo->opd_dinfo; for (i = 0; i < devi->npins; i++) { if (devi->pins[i] > sc->sc_npins) continue; sc->sc_pins[devi->pins[i]].mapped = 0; } gpiobus_free_ivars(devi); resource_list_free(&dinfo->opd_dinfo.rl); ofw_bus_gen_destroy_devinfo(&dinfo->opd_obdinfo); free(dinfo, M_DEVBUF); } static int ofw_gpiobus_parse_gpios_impl(device_t consumer, phandle_t cnode, char *pname, struct gpiobus_softc *bussc, struct gpiobus_pin **pins) { int gpiocells, i, j, ncells, npins; pcell_t *gpios; phandle_t gpio; ncells = OF_getencprop_alloc_multi(cnode, pname, sizeof(*gpios), (void **)&gpios); if (ncells == -1) { device_printf(consumer, "Warning: No %s specified in fdt data; " "device may not function.\n", pname); return (-1); } /* * The gpio-specifier is controller independent, the first pcell has * the reference to the GPIO controller phandler. * Count the number of encoded gpio-specifiers on the first pass. */ i = 0; npins = 0; while (i < ncells) { /* Allow NULL specifiers. */ if (gpios[i] == 0) { npins++; i++; continue; } gpio = OF_node_from_xref(gpios[i]); /* If we have bussc, ignore devices from other gpios. */ if (bussc != NULL) if (ofw_bus_get_node(bussc->sc_dev) != gpio) return (0); /* * Check for gpio-controller property and read the #gpio-cells * for this GPIO controller. */ if (!OF_hasprop(gpio, "gpio-controller") || OF_getencprop(gpio, "#gpio-cells", &gpiocells, sizeof(gpiocells)) < 0) { device_printf(consumer, "gpio reference is not a gpio-controller.\n"); OF_prop_free(gpios); return (-1); } if (ncells - i < gpiocells + 1) { device_printf(consumer, "%s cells doesn't match #gpio-cells.\n", pname); return (-1); } npins++; i += gpiocells + 1; } if (npins == 0 || pins == NULL) { if (npins == 0) device_printf(consumer, "no pin specified in %s.\n", pname); OF_prop_free(gpios); return (npins); } *pins = malloc(sizeof(struct gpiobus_pin) * npins, M_DEVBUF, M_NOWAIT | M_ZERO); if (*pins == NULL) { OF_prop_free(gpios); return (-1); } /* Decode the gpio specifier on the second pass. */ i = 0; j = 0; while (i < ncells) { /* Allow NULL specifiers. */ if (gpios[i] == 0) { j++; i++; continue; } gpio = OF_node_from_xref(gpios[i]); /* Read gpio-cells property for this GPIO controller. */ if (OF_getencprop(gpio, "#gpio-cells", &gpiocells, sizeof(gpiocells)) < 0) { device_printf(consumer, "gpio does not have the #gpio-cells property.\n"); goto fail; } /* Return the device reference for the GPIO controller. */ (*pins)[j].dev = OF_device_from_xref(gpios[i]); if ((*pins)[j].dev == NULL) { device_printf(consumer, "no device registered for the gpio controller.\n"); goto fail; } /* * If the gpiobus softc is NULL we use the GPIO_GET_BUS() to * retrieve it. The GPIO_GET_BUS() method is only valid after * the child is probed and attached. */ if (bussc == NULL) { if (GPIO_GET_BUS((*pins)[j].dev) == NULL) { device_printf(consumer, "no gpiobus reference for %s.\n", device_get_nameunit((*pins)[j].dev)); goto fail; } bussc = device_get_softc(GPIO_GET_BUS((*pins)[j].dev)); } /* Get the GPIO pin number and flags. */ if (gpio_map_gpios((*pins)[j].dev, cnode, gpio, gpiocells, &gpios[i + 1], &(*pins)[j].pin, &(*pins)[j].flags) != 0) { device_printf(consumer, "cannot map the gpios specifier.\n"); goto fail; } /* Reserve the GPIO pin. */ if (gpiobus_acquire_pin(bussc->sc_busdev, (*pins)[j].pin) != 0) goto fail; j++; i += gpiocells + 1; } OF_prop_free(gpios); return (npins); fail: OF_prop_free(gpios); free(*pins, M_DEVBUF); return (-1); } static int ofw_gpiobus_probe(device_t dev) { if (ofw_bus_get_node(dev) == -1) return (ENXIO); device_set_desc(dev, "OFW GPIO bus"); return (BUS_PROBE_DEFAULT); } static int ofw_gpiobus_attach(device_t dev) { int err; phandle_t child; err = gpiobus_init_softc(dev); if (err != 0) return (err); bus_generic_probe(dev); bus_enumerate_hinted_children(dev); /* * Attach the children represented in the device tree. */ for (child = OF_child(ofw_bus_get_node(dev)); child != 0; child = OF_peer(child)) { if (OF_hasprop(child, "gpio-hog")) continue; if (!OF_hasprop(child, "gpios")) continue; if (ofw_gpiobus_add_fdt_child(dev, NULL, child) == NULL) continue; } return (bus_generic_attach(dev)); } static device_t ofw_gpiobus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct ofw_gpiobus_devinfo *devi; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(sizeof(struct ofw_gpiobus_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (0); } /* * NULL all the OFW-related parts of the ivars for non-OFW * children. */ devi->opd_obdinfo.obd_node = -1; devi->opd_obdinfo.obd_name = NULL; devi->opd_obdinfo.obd_compat = NULL; devi->opd_obdinfo.obd_type = NULL; devi->opd_obdinfo.obd_model = NULL; device_set_ivars(child, devi); return (child); } static const struct ofw_bus_devinfo * ofw_gpiobus_get_devinfo(device_t bus, device_t dev) { struct ofw_gpiobus_devinfo *dinfo; dinfo = device_get_ivars(dev); return (&dinfo->opd_obdinfo); } static device_method_t ofw_gpiobus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_gpiobus_probe), DEVMETHOD(device_attach, ofw_gpiobus_attach), /* Bus interface */ - DEVMETHOD(bus_child_pnpinfo_str, ofw_bus_gen_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_add_child, ofw_gpiobus_add_child), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, ofw_gpiobus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; devclass_t ofwgpiobus_devclass; DEFINE_CLASS_1(gpiobus, ofw_gpiobus_driver, ofw_gpiobus_methods, sizeof(struct gpiobus_softc), gpiobus_driver); EARLY_DRIVER_MODULE(ofw_gpiobus, gpio, ofw_gpiobus_driver, ofwgpiobus_devclass, 0, 0, BUS_PASS_BUS); MODULE_VERSION(ofw_gpiobus, 1); MODULE_DEPEND(ofw_gpiobus, gpiobus, 1, 1, 1); diff --git a/sys/dev/hid/hidbus.c b/sys/dev/hid/hidbus.c index 3064f9999f2f..58d19a2a4d74 100644 --- a/sys/dev/hid/hidbus.c +++ b/sys/dev/hid/hidbus.c @@ -1,910 +1,909 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019-2020 Vladimir Kondratyev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include -#include +#include #include +#include #define HID_DEBUG_VAR hid_debug #include #include #include #include "hid_if.h" #define INPUT_EPOCH global_epoch_preempt #define HID_RSIZE_MAX 1024 static hid_intr_t hidbus_intr; static device_probe_t hidbus_probe; static device_attach_t hidbus_attach; static device_detach_t hidbus_detach; struct hidbus_ivars { int32_t usage; uint8_t index; uint32_t flags; uintptr_t driver_info; /* for internal use */ struct mtx *mtx; /* child intr mtx */ hid_intr_t *intr_handler; /* executed under mtx*/ void *intr_ctx; unsigned int refcnt; /* protected by mtx */ struct epoch_context epoch_ctx; CK_STAILQ_ENTRY(hidbus_ivars) link; }; struct hidbus_softc { device_t dev; struct sx sx; struct mtx mtx; bool nowrite; struct hid_rdesc_info rdesc; bool overloaded; int nest; /* Child attach nesting lvl */ int nauto; /* Number of autochildren */ CK_STAILQ_HEAD(, hidbus_ivars) tlcs; }; static int hidbus_fill_rdesc_info(struct hid_rdesc_info *hri, const void *data, hid_size_t len) { int error = 0; hri->data = __DECONST(void *, data); hri->len = len; /* * If report descriptor is not available yet, set maximal * report sizes high enough to allow hidraw to work. */ hri->isize = len == 0 ? HID_RSIZE_MAX : hid_report_size_max(data, len, hid_input, &hri->iid); hri->osize = len == 0 ? HID_RSIZE_MAX : hid_report_size_max(data, len, hid_output, &hri->oid); hri->fsize = len == 0 ? HID_RSIZE_MAX : hid_report_size_max(data, len, hid_feature, &hri->fid); if (hri->isize > HID_RSIZE_MAX) { DPRINTF("input size is too large, %u bytes (truncating)\n", hri->isize); hri->isize = HID_RSIZE_MAX; error = EOVERFLOW; } if (hri->osize > HID_RSIZE_MAX) { DPRINTF("output size is too large, %u bytes (truncating)\n", hri->osize); hri->osize = HID_RSIZE_MAX; error = EOVERFLOW; } if (hri->fsize > HID_RSIZE_MAX) { DPRINTF("feature size is too large, %u bytes (truncating)\n", hri->fsize); hri->fsize = HID_RSIZE_MAX; error = EOVERFLOW; } return (error); } int hidbus_locate(const void *desc, hid_size_t size, int32_t u, enum hid_kind k, uint8_t tlc_index, uint8_t index, struct hid_location *loc, uint32_t *flags, uint8_t *id, struct hid_absinfo *ai) { struct hid_data *d; struct hid_item h; int i; d = hid_start_parse(desc, size, 1 << k); HIDBUS_FOREACH_ITEM(d, &h, tlc_index) { for (i = 0; i < h.nusages; i++) { if (h.kind == k && h.usages[i] == u) { if (index--) break; if (loc != NULL) *loc = h.loc; if (flags != NULL) *flags = h.flags; if (id != NULL) *id = h.report_ID; if (ai != NULL && (h.flags&HIO_RELATIVE) == 0) *ai = (struct hid_absinfo) { .max = h.logical_maximum, .min = h.logical_minimum, .res = hid_item_resolution(&h), }; hid_end_parse(d); return (1); } } } if (loc != NULL) loc->size = 0; if (flags != NULL) *flags = 0; if (id != NULL) *id = 0; hid_end_parse(d); return (0); } static device_t hidbus_add_child(device_t dev, u_int order, const char *name, int unit) { struct hidbus_softc *sc = device_get_softc(dev); struct hidbus_ivars *tlc; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); tlc = malloc(sizeof(struct hidbus_ivars), M_DEVBUF, M_WAITOK | M_ZERO); tlc->mtx = &sc->mtx; device_set_ivars(child, tlc); sx_xlock(&sc->sx); CK_STAILQ_INSERT_TAIL(&sc->tlcs, tlc, link); sx_unlock(&sc->sx); return (child); } static int hidbus_enumerate_children(device_t dev, const void* data, hid_size_t len) { struct hidbus_softc *sc = device_get_softc(dev); struct hid_data *hd; struct hid_item hi; device_t child; uint8_t index = 0; if (data == NULL || len == 0) return (ENXIO); /* Add a child for each top level collection */ hd = hid_start_parse(data, len, 1 << hid_input); while (hid_get_item(hd, &hi)) { if (hi.kind != hid_collection || hi.collevel != 1) continue; child = BUS_ADD_CHILD(dev, 0, NULL, -1); if (child == NULL) { device_printf(dev, "Could not add HID device\n"); continue; } hidbus_set_index(child, index); hidbus_set_usage(child, hi.usage); hidbus_set_flags(child, HIDBUS_FLAG_AUTOCHILD); index++; DPRINTF("Add child TLC: 0x%04x:0x%04x\n", HID_GET_USAGE_PAGE(hi.usage), HID_GET_USAGE(hi.usage)); } hid_end_parse(hd); if (index == 0) return (ENXIO); sc->nauto = index; return (0); } static int hidbus_attach_children(device_t dev) { struct hidbus_softc *sc = device_get_softc(dev); int error; HID_INTR_SETUP(device_get_parent(dev), hidbus_intr, sc, &sc->rdesc); error = hidbus_enumerate_children(dev, sc->rdesc.data, sc->rdesc.len); if (error != 0) DPRINTF("failed to enumerate children: error %d\n", error); /* * hidbus_attach_children() can recurse through device_identify-> * hid_set_report_descr() call sequence. Do not perform children * attach twice in that case. */ sc->nest++; bus_generic_probe(dev); sc->nest--; if (sc->nest != 0) return (0); if (hid_is_keyboard(sc->rdesc.data, sc->rdesc.len) != 0) error = bus_generic_attach(dev); else error = bus_delayed_attach_children(dev); if (error != 0) device_printf(dev, "failed to attach child: error %d\n", error); return (error); } static int hidbus_detach_children(device_t dev) { device_t *children, bus; bool is_bus; int i, error; error = 0; is_bus = device_get_devclass(dev) == hidbus_devclass; bus = is_bus ? dev : device_get_parent(dev); KASSERT(device_get_devclass(bus) == hidbus_devclass, ("Device is not hidbus or it's child")); if (is_bus) { /* If hidbus is passed, delete all children. */ bus_generic_detach(bus); device_delete_children(bus); } else { /* * If hidbus child is passed, delete all hidbus children * except caller. Deleting the caller may result in deadlock. */ error = device_get_children(bus, &children, &i); if (error != 0) return (error); while (i-- > 0) { if (children[i] == dev) continue; DPRINTF("Delete child. index=%d (%s)\n", hidbus_get_index(children[i]), device_get_nameunit(children[i])); error = device_delete_child(bus, children[i]); if (error) { DPRINTF("Failed deleting %s\n", device_get_nameunit(children[i])); break; } } free(children, M_TEMP); } HID_INTR_UNSETUP(device_get_parent(bus)); return (error); } static int hidbus_probe(device_t dev) { device_set_desc(dev, "HID bus"); /* Allow other subclasses to override this driver. */ return (BUS_PROBE_GENERIC); } static int hidbus_attach(device_t dev) { struct hidbus_softc *sc = device_get_softc(dev); struct hid_device_info *devinfo = device_get_ivars(dev); void *d_ptr = NULL; hid_size_t d_len; int error; sc->dev = dev; CK_STAILQ_INIT(&sc->tlcs); mtx_init(&sc->mtx, "hidbus ivar lock", NULL, MTX_DEF); sx_init(&sc->sx, "hidbus ivar list lock"); /* * Ignore error. It is possible for non-HID device e.g. XBox360 gamepad * to emulate HID through overloading of report descriptor. */ d_len = devinfo->rdescsize; if (d_len != 0) { d_ptr = malloc(d_len, M_DEVBUF, M_ZERO | M_WAITOK); error = hid_get_rdesc(dev, d_ptr, d_len); if (error != 0) { free(d_ptr, M_DEVBUF); d_len = 0; d_ptr = NULL; } } hidbus_fill_rdesc_info(&sc->rdesc, d_ptr, d_len); sc->nowrite = hid_test_quirk(devinfo, HQ_NOWRITE); error = hidbus_attach_children(dev); if (error != 0) { hidbus_detach(dev); return (ENXIO); } return (0); } static int hidbus_detach(device_t dev) { struct hidbus_softc *sc = device_get_softc(dev); hidbus_detach_children(dev); sx_destroy(&sc->sx); mtx_destroy(&sc->mtx); free(sc->rdesc.data, M_DEVBUF); return (0); } static void hidbus_child_detached(device_t bus, device_t child) { struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *tlc = device_get_ivars(child); KASSERT(tlc->refcnt == 0, ("Child device is running")); tlc->mtx = &sc->mtx; tlc->intr_handler = NULL; tlc->flags &= ~HIDBUS_FLAG_CAN_POLL; } /* * Epoch callback indicating tlc is safe to destroy */ static void hidbus_ivar_dtor(epoch_context_t ctx) { struct hidbus_ivars *tlc; tlc = __containerof(ctx, struct hidbus_ivars, epoch_ctx); free(tlc, M_DEVBUF); } static void hidbus_child_deleted(device_t bus, device_t child) { struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *tlc = device_get_ivars(child); sx_xlock(&sc->sx); KASSERT(tlc->refcnt == 0, ("Child device is running")); CK_STAILQ_REMOVE(&sc->tlcs, tlc, hidbus_ivars, link); sx_unlock(&sc->sx); epoch_call(INPUT_EPOCH, hidbus_ivar_dtor, &tlc->epoch_ctx); } static int hidbus_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *tlc = device_get_ivars(child); switch (which) { case HIDBUS_IVAR_INDEX: *result = tlc->index; break; case HIDBUS_IVAR_USAGE: *result = tlc->usage; break; case HIDBUS_IVAR_FLAGS: *result = tlc->flags; break; case HIDBUS_IVAR_DRIVER_INFO: *result = tlc->driver_info; break; case HIDBUS_IVAR_LOCK: *result = (uintptr_t)(tlc->mtx == &sc->mtx ? NULL : tlc->mtx); break; default: return (EINVAL); } return (0); } static int hidbus_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *tlc = device_get_ivars(child); switch (which) { case HIDBUS_IVAR_INDEX: tlc->index = value; break; case HIDBUS_IVAR_USAGE: tlc->usage = value; break; case HIDBUS_IVAR_FLAGS: tlc->flags = value; if ((value & HIDBUS_FLAG_CAN_POLL) != 0) HID_INTR_SETUP( device_get_parent(bus), NULL, NULL, NULL); break; case HIDBUS_IVAR_DRIVER_INFO: tlc->driver_info = value; break; case HIDBUS_IVAR_LOCK: tlc->mtx = (struct mtx *)value == NULL ? &sc->mtx : (struct mtx *)value; break; default: return (EINVAL); } return (0); } /* Location hint for devctl(8) */ static int -hidbus_child_location_str(device_t bus, device_t child, char *buf, - size_t buflen) +hidbus_child_location(device_t bus, device_t child, struct sbuf *sb) { struct hidbus_ivars *tlc = device_get_ivars(child); - snprintf(buf, buflen, "index=%hhu", tlc->index); + sbuf_printf(sb, "index=%hhu", tlc->index); return (0); } /* PnP information for devctl(8) */ static int -hidbus_child_pnpinfo_str(device_t bus, device_t child, char *buf, - size_t buflen) +hidbus_child_pnpinfo(device_t bus, device_t child, struct sbuf *sb) { struct hidbus_ivars *tlc = device_get_ivars(child); struct hid_device_info *devinfo = device_get_ivars(bus); - snprintf(buf, buflen, "page=0x%04x usage=0x%04x bus=0x%02hx " + sbuf_printf(sb, "page=0x%04x usage=0x%04x bus=0x%02hx " "vendor=0x%04hx product=0x%04hx version=0x%04hx%s%s", HID_GET_USAGE_PAGE(tlc->usage), HID_GET_USAGE(tlc->usage), devinfo->idBus, devinfo->idVendor, devinfo->idProduct, devinfo->idVersion, devinfo->idPnP[0] == '\0' ? "" : " _HID=", devinfo->idPnP[0] == '\0' ? "" : devinfo->idPnP); return (0); } void hidbus_set_desc(device_t child, const char *suffix) { device_t bus = device_get_parent(child); struct hidbus_softc *sc = device_get_softc(bus); struct hid_device_info *devinfo = device_get_ivars(bus); struct hidbus_ivars *tlc = device_get_ivars(child); char buf[80]; /* Do not add NULL suffix or if device name already contains it. */ if (suffix != NULL && strcasestr(devinfo->name, suffix) == NULL && (sc->nauto > 1 || (tlc->flags & HIDBUS_FLAG_AUTOCHILD) == 0)) { snprintf(buf, sizeof(buf), "%s %s", devinfo->name, suffix); device_set_desc_copy(child, buf); } else device_set_desc(child, devinfo->name); } device_t hidbus_find_child(device_t bus, int32_t usage) { device_t *children, child; int ccount, i; GIANT_REQUIRED; /* Get a list of all hidbus children */ if (device_get_children(bus, &children, &ccount) != 0) return (NULL); /* Scan through to find required TLC */ for (i = 0, child = NULL; i < ccount; i++) { if (hidbus_get_usage(children[i]) == usage) { child = children[i]; break; } } free(children, M_TEMP); return (child); } void hidbus_intr(void *context, void *buf, hid_size_t len) { struct hidbus_softc *sc = context; struct hidbus_ivars *tlc; struct epoch_tracker et; /* * Broadcast input report to all subscribers. * TODO: Add check for input report ID. * * Relock mutex on every TLC item as we can't hold any locks over whole * TLC list here due to LOR with open()/close() handlers. */ if (!HID_IN_POLLING_MODE()) epoch_enter_preempt(INPUT_EPOCH, &et); CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) { if (tlc->refcnt == 0 || tlc->intr_handler == NULL) continue; if (HID_IN_POLLING_MODE()) { if ((tlc->flags & HIDBUS_FLAG_CAN_POLL) != 0) tlc->intr_handler(tlc->intr_ctx, buf, len); } else { mtx_lock(tlc->mtx); tlc->intr_handler(tlc->intr_ctx, buf, len); mtx_unlock(tlc->mtx); } } if (!HID_IN_POLLING_MODE()) epoch_exit_preempt(INPUT_EPOCH, &et); } void hidbus_set_intr(device_t child, hid_intr_t *handler, void *context) { struct hidbus_ivars *tlc = device_get_ivars(child); tlc->intr_handler = handler; tlc->intr_ctx = context; } int hidbus_intr_start(device_t child) { device_t bus = device_get_parent(child); struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *ivar = device_get_ivars(child); struct hidbus_ivars *tlc; int refcnt = 0; int error; if (sx_xlock_sig(&sc->sx) != 0) return (EINTR); CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) { refcnt += tlc->refcnt; if (tlc == ivar) { mtx_lock(tlc->mtx); ++tlc->refcnt; mtx_unlock(tlc->mtx); } } error = refcnt != 0 ? 0 : HID_INTR_START(device_get_parent(bus)); sx_unlock(&sc->sx); return (error); } int hidbus_intr_stop(device_t child) { device_t bus = device_get_parent(child); struct hidbus_softc *sc = device_get_softc(bus); struct hidbus_ivars *ivar = device_get_ivars(child); struct hidbus_ivars *tlc; bool refcnt = 0; int error; if (sx_xlock_sig(&sc->sx) != 0) return (EINTR); CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) { if (tlc == ivar) { mtx_lock(tlc->mtx); MPASS(tlc->refcnt != 0); --tlc->refcnt; mtx_unlock(tlc->mtx); } refcnt += tlc->refcnt; } error = refcnt != 0 ? 0 : HID_INTR_STOP(device_get_parent(bus)); sx_unlock(&sc->sx); return (error); } void hidbus_intr_poll(device_t child) { device_t bus = device_get_parent(child); HID_INTR_POLL(device_get_parent(bus)); } struct hid_rdesc_info * hidbus_get_rdesc_info(device_t child) { device_t bus = device_get_parent(child); struct hidbus_softc *sc = device_get_softc(bus); return (&sc->rdesc); } /* * HID interface. * * Hidbus as well as any hidbus child can be passed as first arg. */ /* Read cached report descriptor */ int hid_get_report_descr(device_t dev, void **data, hid_size_t *len) { device_t bus; struct hidbus_softc *sc; bus = device_get_devclass(dev) == hidbus_devclass ? dev : device_get_parent(dev); sc = device_get_softc(bus); /* * Do not send request to a transport backend. * Use cached report descriptor instead of it. */ if (sc->rdesc.data == NULL || sc->rdesc.len == 0) return (ENXIO); if (data != NULL) *data = sc->rdesc.data; if (len != NULL) *len = sc->rdesc.len; return (0); } /* * Replace cached report descriptor with top level driver provided one. * * It deletes all hidbus children except caller and enumerates them again after * new descriptor has been registered. Currently it can not be called from * autoenumerated (by report's TLC) child device context as it results in child * duplication. To overcome this limitation hid_set_report_descr() should be * called from device_identify driver's handler with hidbus itself passed as * 'device_t dev' parameter. */ int hid_set_report_descr(device_t dev, const void *data, hid_size_t len) { struct hid_rdesc_info rdesc; device_t bus; struct hidbus_softc *sc; bool is_bus; int error; GIANT_REQUIRED; is_bus = device_get_devclass(dev) == hidbus_devclass; bus = is_bus ? dev : device_get_parent(dev); sc = device_get_softc(bus); /* * Do not overload already overloaded report descriptor in * device_identify handler. It causes infinite recursion loop. */ if (is_bus && sc->overloaded) return(0); DPRINTFN(5, "len=%d\n", len); DPRINTFN(5, "data = %*D\n", len, data, " "); error = hidbus_fill_rdesc_info(&rdesc, data, len); if (error != 0) return (error); error = hidbus_detach_children(dev); if (error != 0) return(error); /* Make private copy to handle a case of dynamicaly allocated data. */ rdesc.data = malloc(len, M_DEVBUF, M_ZERO | M_WAITOK); bcopy(data, rdesc.data, len); sc->overloaded = true; free(sc->rdesc.data, M_DEVBUF); bcopy(&rdesc, &sc->rdesc, sizeof(struct hid_rdesc_info)); error = hidbus_attach_children(bus); return (error); } static int hidbus_write(device_t dev, const void *data, hid_size_t len) { struct hidbus_softc *sc; uint8_t id; sc = device_get_softc(dev); /* * Output interrupt endpoint is often optional. If HID device * does not provide it, send reports via control pipe. */ if (sc->nowrite) { /* try to extract the ID byte */ id = (sc->rdesc.oid & (len > 0)) ? *(const uint8_t*)data : 0; return (hid_set_report(dev, data, len, HID_OUTPUT_REPORT, id)); } return (hid_write(dev, data, len)); } /*------------------------------------------------------------------------* * hidbus_lookup_id * * This functions takes an array of "struct hid_device_id" and tries * to match the entries with the information in "struct hid_device_info". * * Return values: * NULL: No match found. * Else: Pointer to matching entry. *------------------------------------------------------------------------*/ const struct hid_device_id * hidbus_lookup_id(device_t dev, const struct hid_device_id *id, int nitems_id) { const struct hid_device_id *id_end; const struct hid_device_info *info; int32_t usage; bool is_child; if (id == NULL) { goto done; } id_end = id + nitems_id; info = hid_get_device_info(dev); is_child = device_get_devclass(dev) != hidbus_devclass; if (is_child) usage = hidbus_get_usage(dev); /* * Keep on matching array entries until we find a match or * until we reach the end of the matching array: */ for (; id != id_end; id++) { if (is_child && (id->match_flag_page) && (id->page != HID_GET_USAGE_PAGE(usage))) { continue; } if (is_child && (id->match_flag_usage) && (id->usage != HID_GET_USAGE(usage))) { continue; } if ((id->match_flag_bus) && (id->idBus != info->idBus)) { continue; } if ((id->match_flag_vendor) && (id->idVendor != info->idVendor)) { continue; } if ((id->match_flag_product) && (id->idProduct != info->idProduct)) { continue; } if ((id->match_flag_ver_lo) && (id->idVersion_lo > info->idVersion)) { continue; } if ((id->match_flag_ver_hi) && (id->idVersion_hi < info->idVersion)) { continue; } if (id->match_flag_pnp && strncmp(id->idPnP, info->idPnP, HID_PNP_ID_SIZE) != 0) { continue; } /* We found a match! */ return (id); } done: return (NULL); } /*------------------------------------------------------------------------* * hidbus_lookup_driver_info - factored out code * * Return values: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ int hidbus_lookup_driver_info(device_t child, const struct hid_device_id *id, int nitems_id) { id = hidbus_lookup_id(child, id, nitems_id); if (id) { /* copy driver info */ hidbus_set_driver_info(child, id->driver_info); return (0); } return (ENXIO); } const struct hid_device_info * hid_get_device_info(device_t dev) { device_t bus; bus = device_get_devclass(dev) == hidbus_devclass ? dev : device_get_parent(dev); return (device_get_ivars(bus)); } static device_method_t hidbus_methods[] = { /* device interface */ DEVMETHOD(device_probe, hidbus_probe), DEVMETHOD(device_attach, hidbus_attach), DEVMETHOD(device_detach, hidbus_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* bus interface */ DEVMETHOD(bus_add_child, hidbus_add_child), DEVMETHOD(bus_child_detached, hidbus_child_detached), DEVMETHOD(bus_child_deleted, hidbus_child_deleted), DEVMETHOD(bus_read_ivar, hidbus_read_ivar), DEVMETHOD(bus_write_ivar, hidbus_write_ivar), - DEVMETHOD(bus_child_pnpinfo_str,hidbus_child_pnpinfo_str), - DEVMETHOD(bus_child_location_str,hidbus_child_location_str), + DEVMETHOD(bus_child_pnpinfo, hidbus_child_pnpinfo), + DEVMETHOD(bus_child_location, hidbus_child_location), /* hid interface */ DEVMETHOD(hid_get_rdesc, hid_get_rdesc), DEVMETHOD(hid_read, hid_read), DEVMETHOD(hid_write, hidbus_write), DEVMETHOD(hid_get_report, hid_get_report), DEVMETHOD(hid_set_report, hid_set_report), DEVMETHOD(hid_set_idle, hid_set_idle), DEVMETHOD(hid_set_protocol, hid_set_protocol), DEVMETHOD_END }; devclass_t hidbus_devclass; driver_t hidbus_driver = { "hidbus", hidbus_methods, sizeof(struct hidbus_softc), }; MODULE_DEPEND(hidbus, hid, 1, 1, 1); MODULE_VERSION(hidbus, 1); DRIVER_MODULE(hidbus, iichid, hidbus_driver, hidbus_devclass, 0, 0); DRIVER_MODULE(hidbus, usbhid, hidbus_driver, hidbus_devclass, 0, 0); diff --git a/sys/dev/hyperv/vmbus/vmbus.c b/sys/dev/hyperv/vmbus/vmbus.c index 929eff33e7c9..31951cbf4858 100644 --- a/sys/dev/hyperv/vmbus/vmbus.c +++ b/sys/dev/hyperv/vmbus/vmbus.c @@ -1,1681 +1,1679 @@ /*- * Copyright (c) 2009-2012,2016-2017 Microsoft Corp. * Copyright (c) 2012 NetApp Inc. * Copyright (c) 2012 Citrix Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * VM Bus Driver Implementation */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "acpi_if.h" #include "pcib_if.h" #include "vmbus_if.h" #define VMBUS_GPADL_START 0xe1e10 struct vmbus_msghc { struct vmbus_xact *mh_xact; struct hypercall_postmsg_in mh_inprm_save; }; static void vmbus_identify(driver_t *, device_t); static int vmbus_probe(device_t); static int vmbus_attach(device_t); static int vmbus_detach(device_t); static int vmbus_read_ivar(device_t, device_t, int, uintptr_t *); -static int vmbus_child_pnpinfo_str(device_t, device_t, - char *, size_t); +static int vmbus_child_pnpinfo(device_t, device_t, struct sbuf *); static struct resource *vmbus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int vmbus_alloc_msi(device_t bus, device_t dev, int count, int maxcount, int *irqs); static int vmbus_release_msi(device_t bus, device_t dev, int count, int *irqs); static int vmbus_alloc_msix(device_t bus, device_t dev, int *irq); static int vmbus_release_msix(device_t bus, device_t dev, int irq); static int vmbus_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr, uint32_t *data); static uint32_t vmbus_get_version_method(device_t, device_t); static int vmbus_probe_guid_method(device_t, device_t, const struct hyperv_guid *); static uint32_t vmbus_get_vcpu_id_method(device_t bus, device_t dev, int cpu); static struct taskqueue *vmbus_get_eventtq_method(device_t, device_t, int); #ifdef EARLY_AP_STARTUP static void vmbus_intrhook(void *); #endif static int vmbus_init(struct vmbus_softc *); static int vmbus_connect(struct vmbus_softc *, uint32_t); static int vmbus_req_channels(struct vmbus_softc *sc); static void vmbus_disconnect(struct vmbus_softc *); static int vmbus_scan(struct vmbus_softc *); static void vmbus_scan_teardown(struct vmbus_softc *); static void vmbus_scan_done(struct vmbus_softc *, const struct vmbus_message *); static void vmbus_chanmsg_handle(struct vmbus_softc *, const struct vmbus_message *); static void vmbus_msg_task(void *, int); static void vmbus_synic_setup(void *); static void vmbus_synic_teardown(void *); static int vmbus_sysctl_version(SYSCTL_HANDLER_ARGS); static int vmbus_dma_alloc(struct vmbus_softc *); static void vmbus_dma_free(struct vmbus_softc *); static int vmbus_intr_setup(struct vmbus_softc *); static void vmbus_intr_teardown(struct vmbus_softc *); static int vmbus_doattach(struct vmbus_softc *); static void vmbus_event_proc_dummy(struct vmbus_softc *, int); static struct vmbus_softc *vmbus_sc; SYSCTL_NODE(_hw, OID_AUTO, vmbus, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Hyper-V vmbus"); static int vmbus_pin_evttask = 1; SYSCTL_INT(_hw_vmbus, OID_AUTO, pin_evttask, CTLFLAG_RDTUN, &vmbus_pin_evttask, 0, "Pin event tasks to their respective CPU"); extern inthand_t IDTVEC(vmbus_isr), IDTVEC(vmbus_isr_pti); #define VMBUS_ISR_ADDR trunc_page((uintptr_t)IDTVEC(vmbus_isr_pti)) uint32_t vmbus_current_version; static const uint32_t vmbus_version[] = { VMBUS_VERSION_WIN10, VMBUS_VERSION_WIN8_1, VMBUS_VERSION_WIN8, VMBUS_VERSION_WIN7, VMBUS_VERSION_WS2008 }; static const vmbus_chanmsg_proc_t vmbus_chanmsg_handlers[VMBUS_CHANMSG_TYPE_MAX] = { VMBUS_CHANMSG_PROC(CHOFFER_DONE, vmbus_scan_done), VMBUS_CHANMSG_PROC_WAKEUP(CONNECT_RESP) }; static device_method_t vmbus_methods[] = { /* Device interface */ DEVMETHOD(device_identify, vmbus_identify), DEVMETHOD(device_probe, vmbus_probe), DEVMETHOD(device_attach, vmbus_attach), DEVMETHOD(device_detach, vmbus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_read_ivar, vmbus_read_ivar), - DEVMETHOD(bus_child_pnpinfo_str, vmbus_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, vmbus_child_pnpinfo), DEVMETHOD(bus_alloc_resource, vmbus_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), #if __FreeBSD_version >= 1100000 DEVMETHOD(bus_get_cpus, bus_generic_get_cpus), #endif /* pcib interface */ DEVMETHOD(pcib_alloc_msi, vmbus_alloc_msi), DEVMETHOD(pcib_release_msi, vmbus_release_msi), DEVMETHOD(pcib_alloc_msix, vmbus_alloc_msix), DEVMETHOD(pcib_release_msix, vmbus_release_msix), DEVMETHOD(pcib_map_msi, vmbus_map_msi), /* Vmbus interface */ DEVMETHOD(vmbus_get_version, vmbus_get_version_method), DEVMETHOD(vmbus_probe_guid, vmbus_probe_guid_method), DEVMETHOD(vmbus_get_vcpu_id, vmbus_get_vcpu_id_method), DEVMETHOD(vmbus_get_event_taskq, vmbus_get_eventtq_method), DEVMETHOD_END }; static driver_t vmbus_driver = { "vmbus", vmbus_methods, sizeof(struct vmbus_softc) }; static devclass_t vmbus_devclass; DRIVER_MODULE(vmbus, pcib, vmbus_driver, vmbus_devclass, NULL, NULL); DRIVER_MODULE(vmbus, acpi_syscontainer, vmbus_driver, vmbus_devclass, NULL, NULL); MODULE_DEPEND(vmbus, acpi, 1, 1, 1); MODULE_DEPEND(vmbus, pci, 1, 1, 1); MODULE_VERSION(vmbus, 1); static __inline struct vmbus_softc * vmbus_get_softc(void) { return vmbus_sc; } void vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize) { struct hypercall_postmsg_in *inprm; if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) panic("invalid data size %zu", dsize); inprm = vmbus_xact_req_data(mh->mh_xact); memset(inprm, 0, HYPERCALL_POSTMSGIN_SIZE); inprm->hc_connid = VMBUS_CONNID_MESSAGE; inprm->hc_msgtype = HYPERV_MSGTYPE_CHANNEL; inprm->hc_dsize = dsize; } struct vmbus_msghc * vmbus_msghc_get(struct vmbus_softc *sc, size_t dsize) { struct vmbus_msghc *mh; struct vmbus_xact *xact; if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) panic("invalid data size %zu", dsize); xact = vmbus_xact_get(sc->vmbus_xc, dsize + __offsetof(struct hypercall_postmsg_in, hc_data[0])); if (xact == NULL) return (NULL); mh = vmbus_xact_priv(xact, sizeof(*mh)); mh->mh_xact = xact; vmbus_msghc_reset(mh, dsize); return (mh); } void vmbus_msghc_put(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) { vmbus_xact_put(mh->mh_xact); } void * vmbus_msghc_dataptr(struct vmbus_msghc *mh) { struct hypercall_postmsg_in *inprm; inprm = vmbus_xact_req_data(mh->mh_xact); return (inprm->hc_data); } int vmbus_msghc_exec_noresult(struct vmbus_msghc *mh) { sbintime_t time = SBT_1MS; struct hypercall_postmsg_in *inprm; bus_addr_t inprm_paddr; int i; inprm = vmbus_xact_req_data(mh->mh_xact); inprm_paddr = vmbus_xact_req_paddr(mh->mh_xact); /* * Save the input parameter so that we could restore the input * parameter if the Hypercall failed. * * XXX * Is this really necessary?! i.e. Will the Hypercall ever * overwrite the input parameter? */ memcpy(&mh->mh_inprm_save, inprm, HYPERCALL_POSTMSGIN_SIZE); /* * In order to cope with transient failures, e.g. insufficient * resources on host side, we retry the post message Hypercall * several times. 20 retries seem sufficient. */ #define HC_RETRY_MAX 20 for (i = 0; i < HC_RETRY_MAX; ++i) { uint64_t status; status = hypercall_post_message(inprm_paddr); if (status == HYPERCALL_STATUS_SUCCESS) return 0; pause_sbt("hcpmsg", time, 0, C_HARDCLOCK); if (time < SBT_1S * 2) time *= 2; /* Restore input parameter and try again */ memcpy(inprm, &mh->mh_inprm_save, HYPERCALL_POSTMSGIN_SIZE); } #undef HC_RETRY_MAX return EIO; } int vmbus_msghc_exec(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) { int error; vmbus_xact_activate(mh->mh_xact); error = vmbus_msghc_exec_noresult(mh); if (error) vmbus_xact_deactivate(mh->mh_xact); return error; } void vmbus_msghc_exec_cancel(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) { vmbus_xact_deactivate(mh->mh_xact); } const struct vmbus_message * vmbus_msghc_wait_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) { size_t resp_len; return (vmbus_xact_wait(mh->mh_xact, &resp_len)); } const struct vmbus_message * vmbus_msghc_poll_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) { size_t resp_len; return (vmbus_xact_poll(mh->mh_xact, &resp_len)); } void vmbus_msghc_wakeup(struct vmbus_softc *sc, const struct vmbus_message *msg) { vmbus_xact_ctx_wakeup(sc->vmbus_xc, msg, sizeof(*msg)); } uint32_t vmbus_gpadl_alloc(struct vmbus_softc *sc) { uint32_t gpadl; again: gpadl = atomic_fetchadd_int(&sc->vmbus_gpadl, 1); if (gpadl == 0) goto again; return (gpadl); } /* Used for Hyper-V socket when guest client connects to host */ int vmbus_req_tl_connect(struct hyperv_guid *guest_srv_id, struct hyperv_guid *host_srv_id) { struct vmbus_softc *sc = vmbus_get_softc(); struct vmbus_chanmsg_tl_connect *req; struct vmbus_msghc *mh; int error; if (!sc) return ENXIO; mh = vmbus_msghc_get(sc, sizeof(*req)); if (mh == NULL) { device_printf(sc->vmbus_dev, "can not get msg hypercall for tl connect\n"); return ENXIO; } req = vmbus_msghc_dataptr(mh); req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_TL_CONN; req->guest_endpoint_id = *guest_srv_id; req->host_service_id = *host_srv_id; error = vmbus_msghc_exec_noresult(mh); vmbus_msghc_put(sc, mh); if (error) { device_printf(sc->vmbus_dev, "tl connect msg hypercall failed\n"); } return error; } static int vmbus_connect(struct vmbus_softc *sc, uint32_t version) { struct vmbus_chanmsg_connect *req; const struct vmbus_message *msg; struct vmbus_msghc *mh; int error, done = 0; mh = vmbus_msghc_get(sc, sizeof(*req)); if (mh == NULL) return ENXIO; req = vmbus_msghc_dataptr(mh); req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CONNECT; req->chm_ver = version; req->chm_evtflags = sc->vmbus_evtflags_dma.hv_paddr; req->chm_mnf1 = sc->vmbus_mnf1_dma.hv_paddr; req->chm_mnf2 = sc->vmbus_mnf2_dma.hv_paddr; error = vmbus_msghc_exec(sc, mh); if (error) { vmbus_msghc_put(sc, mh); return error; } msg = vmbus_msghc_wait_result(sc, mh); done = ((const struct vmbus_chanmsg_connect_resp *) msg->msg_data)->chm_done; vmbus_msghc_put(sc, mh); return (done ? 0 : EOPNOTSUPP); } static int vmbus_init(struct vmbus_softc *sc) { int i; for (i = 0; i < nitems(vmbus_version); ++i) { int error; error = vmbus_connect(sc, vmbus_version[i]); if (!error) { vmbus_current_version = vmbus_version[i]; sc->vmbus_version = vmbus_version[i]; device_printf(sc->vmbus_dev, "version %u.%u\n", VMBUS_VERSION_MAJOR(sc->vmbus_version), VMBUS_VERSION_MINOR(sc->vmbus_version)); return 0; } } return ENXIO; } static void vmbus_disconnect(struct vmbus_softc *sc) { struct vmbus_chanmsg_disconnect *req; struct vmbus_msghc *mh; int error; mh = vmbus_msghc_get(sc, sizeof(*req)); if (mh == NULL) { device_printf(sc->vmbus_dev, "can not get msg hypercall for disconnect\n"); return; } req = vmbus_msghc_dataptr(mh); req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_DISCONNECT; error = vmbus_msghc_exec_noresult(mh); vmbus_msghc_put(sc, mh); if (error) { device_printf(sc->vmbus_dev, "disconnect msg hypercall failed\n"); } } static int vmbus_req_channels(struct vmbus_softc *sc) { struct vmbus_chanmsg_chrequest *req; struct vmbus_msghc *mh; int error; mh = vmbus_msghc_get(sc, sizeof(*req)); if (mh == NULL) return ENXIO; req = vmbus_msghc_dataptr(mh); req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHREQUEST; error = vmbus_msghc_exec_noresult(mh); vmbus_msghc_put(sc, mh); return error; } static void vmbus_scan_done_task(void *xsc, int pending __unused) { struct vmbus_softc *sc = xsc; mtx_lock(&Giant); sc->vmbus_scandone = true; mtx_unlock(&Giant); wakeup(&sc->vmbus_scandone); } static void vmbus_scan_done(struct vmbus_softc *sc, const struct vmbus_message *msg __unused) { taskqueue_enqueue(sc->vmbus_devtq, &sc->vmbus_scandone_task); } static int vmbus_scan(struct vmbus_softc *sc) { int error; /* * Identify, probe and attach for non-channel devices. */ bus_generic_probe(sc->vmbus_dev); bus_generic_attach(sc->vmbus_dev); /* * This taskqueue serializes vmbus devices' attach and detach * for channel offer and rescind messages. */ sc->vmbus_devtq = taskqueue_create("vmbus dev", M_WAITOK, taskqueue_thread_enqueue, &sc->vmbus_devtq); taskqueue_start_threads(&sc->vmbus_devtq, 1, PI_NET, "vmbusdev"); TASK_INIT(&sc->vmbus_scandone_task, 0, vmbus_scan_done_task, sc); /* * This taskqueue handles sub-channel detach, so that vmbus * device's detach running in vmbus_devtq can drain its sub- * channels. */ sc->vmbus_subchtq = taskqueue_create("vmbus subch", M_WAITOK, taskqueue_thread_enqueue, &sc->vmbus_subchtq); taskqueue_start_threads(&sc->vmbus_subchtq, 1, PI_NET, "vmbussch"); /* * Start vmbus scanning. */ error = vmbus_req_channels(sc); if (error) { device_printf(sc->vmbus_dev, "channel request failed: %d\n", error); return (error); } /* * Wait for all vmbus devices from the initial channel offers to be * attached. */ GIANT_REQUIRED; while (!sc->vmbus_scandone) mtx_sleep(&sc->vmbus_scandone, &Giant, 0, "vmbusdev", 0); if (bootverbose) { device_printf(sc->vmbus_dev, "device scan, probe and attach " "done\n"); } return (0); } static void vmbus_scan_teardown(struct vmbus_softc *sc) { GIANT_REQUIRED; if (sc->vmbus_devtq != NULL) { mtx_unlock(&Giant); taskqueue_free(sc->vmbus_devtq); mtx_lock(&Giant); sc->vmbus_devtq = NULL; } if (sc->vmbus_subchtq != NULL) { mtx_unlock(&Giant); taskqueue_free(sc->vmbus_subchtq); mtx_lock(&Giant); sc->vmbus_subchtq = NULL; } } static void vmbus_chanmsg_handle(struct vmbus_softc *sc, const struct vmbus_message *msg) { vmbus_chanmsg_proc_t msg_proc; uint32_t msg_type; msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type; if (msg_type >= VMBUS_CHANMSG_TYPE_MAX) { device_printf(sc->vmbus_dev, "unknown message type 0x%x\n", msg_type); return; } msg_proc = vmbus_chanmsg_handlers[msg_type]; if (msg_proc != NULL) msg_proc(sc, msg); /* Channel specific processing */ vmbus_chan_msgproc(sc, msg); } static void vmbus_msg_task(void *xsc, int pending __unused) { struct vmbus_softc *sc = xsc; volatile struct vmbus_message *msg; msg = VMBUS_PCPU_GET(sc, message, curcpu) + VMBUS_SINT_MESSAGE; for (;;) { if (msg->msg_type == HYPERV_MSGTYPE_NONE) { /* No message */ break; } else if (msg->msg_type == HYPERV_MSGTYPE_CHANNEL) { /* Channel message */ vmbus_chanmsg_handle(sc, __DEVOLATILE(const struct vmbus_message *, msg)); } msg->msg_type = HYPERV_MSGTYPE_NONE; /* * Make sure the write to msg_type (i.e. set to * HYPERV_MSGTYPE_NONE) happens before we read the * msg_flags and EOMing. Otherwise, the EOMing will * not deliver any more messages since there is no * empty slot * * NOTE: * mb() is used here, since atomic_thread_fence_seq_cst() * will become compiler fence on UP kernel. */ mb(); if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { /* * This will cause message queue rescan to possibly * deliver another msg from the hypervisor */ wrmsr(MSR_HV_EOM, 0); } } } static __inline int vmbus_handle_intr1(struct vmbus_softc *sc, struct trapframe *frame, int cpu) { volatile struct vmbus_message *msg; struct vmbus_message *msg_base; msg_base = VMBUS_PCPU_GET(sc, message, cpu); /* * Check event timer. * * TODO: move this to independent IDT vector. */ msg = msg_base + VMBUS_SINT_TIMER; if (msg->msg_type == HYPERV_MSGTYPE_TIMER_EXPIRED) { msg->msg_type = HYPERV_MSGTYPE_NONE; vmbus_et_intr(frame); /* * Make sure the write to msg_type (i.e. set to * HYPERV_MSGTYPE_NONE) happens before we read the * msg_flags and EOMing. Otherwise, the EOMing will * not deliver any more messages since there is no * empty slot * * NOTE: * mb() is used here, since atomic_thread_fence_seq_cst() * will become compiler fence on UP kernel. */ mb(); if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { /* * This will cause message queue rescan to possibly * deliver another msg from the hypervisor */ wrmsr(MSR_HV_EOM, 0); } } /* * Check events. Hot path for network and storage I/O data; high rate. * * NOTE: * As recommended by the Windows guest fellows, we check events before * checking messages. */ sc->vmbus_event_proc(sc, cpu); /* * Check messages. Mainly management stuffs; ultra low rate. */ msg = msg_base + VMBUS_SINT_MESSAGE; if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) { taskqueue_enqueue(VMBUS_PCPU_GET(sc, message_tq, cpu), VMBUS_PCPU_PTR(sc, message_task, cpu)); } return (FILTER_HANDLED); } void vmbus_handle_intr(struct trapframe *trap_frame) { struct vmbus_softc *sc = vmbus_get_softc(); int cpu = curcpu; /* * Disable preemption. */ critical_enter(); /* * Do a little interrupt counting. */ (*VMBUS_PCPU_GET(sc, intr_cnt, cpu))++; vmbus_handle_intr1(sc, trap_frame, cpu); /* * Enable preemption. */ critical_exit(); } static void vmbus_synic_setup(void *xsc) { struct vmbus_softc *sc = xsc; int cpu = curcpu; uint64_t val, orig; uint32_t sint; if (hyperv_features & CPUID_HV_MSR_VP_INDEX) { /* Save virtual processor id. */ VMBUS_PCPU_GET(sc, vcpuid, cpu) = rdmsr(MSR_HV_VP_INDEX); } else { /* Set virtual processor id to 0 for compatibility. */ VMBUS_PCPU_GET(sc, vcpuid, cpu) = 0; } /* * Setup the SynIC message. */ orig = rdmsr(MSR_HV_SIMP); val = MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK) | ((VMBUS_PCPU_GET(sc, message_dma.hv_paddr, cpu) >> PAGE_SHIFT) << MSR_HV_SIMP_PGSHIFT); wrmsr(MSR_HV_SIMP, val); /* * Setup the SynIC event flags. */ orig = rdmsr(MSR_HV_SIEFP); val = MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK) | ((VMBUS_PCPU_GET(sc, event_flags_dma.hv_paddr, cpu) >> PAGE_SHIFT) << MSR_HV_SIEFP_PGSHIFT); wrmsr(MSR_HV_SIEFP, val); /* * Configure and unmask SINT for message and event flags. */ sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; orig = rdmsr(sint); val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | (orig & MSR_HV_SINT_RSVD_MASK); wrmsr(sint, val); /* * Configure and unmask SINT for timer. */ sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; orig = rdmsr(sint); val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | (orig & MSR_HV_SINT_RSVD_MASK); wrmsr(sint, val); /* * All done; enable SynIC. */ orig = rdmsr(MSR_HV_SCONTROL); val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK); wrmsr(MSR_HV_SCONTROL, val); } static void vmbus_synic_teardown(void *arg) { uint64_t orig; uint32_t sint; /* * Disable SynIC. */ orig = rdmsr(MSR_HV_SCONTROL); wrmsr(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK)); /* * Mask message and event flags SINT. */ sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; orig = rdmsr(sint); wrmsr(sint, orig | MSR_HV_SINT_MASKED); /* * Mask timer SINT. */ sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; orig = rdmsr(sint); wrmsr(sint, orig | MSR_HV_SINT_MASKED); /* * Teardown SynIC message. */ orig = rdmsr(MSR_HV_SIMP); wrmsr(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK)); /* * Teardown SynIC event flags. */ orig = rdmsr(MSR_HV_SIEFP); wrmsr(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK)); } static int vmbus_dma_alloc(struct vmbus_softc *sc) { bus_dma_tag_t parent_dtag; uint8_t *evtflags; int cpu; parent_dtag = bus_get_dma_tag(sc->vmbus_dev); CPU_FOREACH(cpu) { void *ptr; /* * Per-cpu messages and event flags. */ ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, PAGE_SIZE, VMBUS_PCPU_PTR(sc, message_dma, cpu), BUS_DMA_WAITOK | BUS_DMA_ZERO); if (ptr == NULL) return ENOMEM; VMBUS_PCPU_GET(sc, message, cpu) = ptr; ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, PAGE_SIZE, VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), BUS_DMA_WAITOK | BUS_DMA_ZERO); if (ptr == NULL) return ENOMEM; VMBUS_PCPU_GET(sc, event_flags, cpu) = ptr; } evtflags = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, PAGE_SIZE, &sc->vmbus_evtflags_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); if (evtflags == NULL) return ENOMEM; sc->vmbus_rx_evtflags = (u_long *)evtflags; sc->vmbus_tx_evtflags = (u_long *)(evtflags + (PAGE_SIZE / 2)); sc->vmbus_evtflags = evtflags; sc->vmbus_mnf1 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, PAGE_SIZE, &sc->vmbus_mnf1_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); if (sc->vmbus_mnf1 == NULL) return ENOMEM; sc->vmbus_mnf2 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, sizeof(struct vmbus_mnf), &sc->vmbus_mnf2_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); if (sc->vmbus_mnf2 == NULL) return ENOMEM; return 0; } static void vmbus_dma_free(struct vmbus_softc *sc) { int cpu; if (sc->vmbus_evtflags != NULL) { hyperv_dmamem_free(&sc->vmbus_evtflags_dma, sc->vmbus_evtflags); sc->vmbus_evtflags = NULL; sc->vmbus_rx_evtflags = NULL; sc->vmbus_tx_evtflags = NULL; } if (sc->vmbus_mnf1 != NULL) { hyperv_dmamem_free(&sc->vmbus_mnf1_dma, sc->vmbus_mnf1); sc->vmbus_mnf1 = NULL; } if (sc->vmbus_mnf2 != NULL) { hyperv_dmamem_free(&sc->vmbus_mnf2_dma, sc->vmbus_mnf2); sc->vmbus_mnf2 = NULL; } CPU_FOREACH(cpu) { if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) { hyperv_dmamem_free( VMBUS_PCPU_PTR(sc, message_dma, cpu), VMBUS_PCPU_GET(sc, message, cpu)); VMBUS_PCPU_GET(sc, message, cpu) = NULL; } if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) { hyperv_dmamem_free( VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), VMBUS_PCPU_GET(sc, event_flags, cpu)); VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL; } } } static int vmbus_intr_setup(struct vmbus_softc *sc) { int cpu; CPU_FOREACH(cpu) { char buf[MAXCOMLEN + 1]; cpuset_t cpu_mask; /* Allocate an interrupt counter for Hyper-V interrupt */ snprintf(buf, sizeof(buf), "cpu%d:hyperv", cpu); intrcnt_add(buf, VMBUS_PCPU_PTR(sc, intr_cnt, cpu)); /* * Setup taskqueue to handle events. Task will be per- * channel. */ VMBUS_PCPU_GET(sc, event_tq, cpu) = taskqueue_create_fast( "hyperv event", M_WAITOK, taskqueue_thread_enqueue, VMBUS_PCPU_PTR(sc, event_tq, cpu)); if (vmbus_pin_evttask) { CPU_SETOF(cpu, &cpu_mask); taskqueue_start_threads_cpuset( VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, &cpu_mask, "hvevent%d", cpu); } else { taskqueue_start_threads( VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, "hvevent%d", cpu); } /* * Setup tasks and taskqueues to handle messages. */ VMBUS_PCPU_GET(sc, message_tq, cpu) = taskqueue_create_fast( "hyperv msg", M_WAITOK, taskqueue_thread_enqueue, VMBUS_PCPU_PTR(sc, message_tq, cpu)); CPU_SETOF(cpu, &cpu_mask); taskqueue_start_threads_cpuset( VMBUS_PCPU_PTR(sc, message_tq, cpu), 1, PI_NET, &cpu_mask, "hvmsg%d", cpu); TASK_INIT(VMBUS_PCPU_PTR(sc, message_task, cpu), 0, vmbus_msg_task, sc); } #if defined(__amd64__) && defined(KLD_MODULE) pmap_pti_add_kva(VMBUS_ISR_ADDR, VMBUS_ISR_ADDR + PAGE_SIZE, true); #endif /* * All Hyper-V ISR required resources are setup, now let's find a * free IDT vector for Hyper-V ISR and set it up. */ sc->vmbus_idtvec = lapic_ipi_alloc(pti ? IDTVEC(vmbus_isr_pti) : IDTVEC(vmbus_isr)); if (sc->vmbus_idtvec < 0) { #if defined(__amd64__) && defined(KLD_MODULE) pmap_pti_remove_kva(VMBUS_ISR_ADDR, VMBUS_ISR_ADDR + PAGE_SIZE); #endif device_printf(sc->vmbus_dev, "cannot find free IDT vector\n"); return ENXIO; } if (bootverbose) { device_printf(sc->vmbus_dev, "vmbus IDT vector %d\n", sc->vmbus_idtvec); } return 0; } static void vmbus_intr_teardown(struct vmbus_softc *sc) { int cpu; if (sc->vmbus_idtvec >= 0) { lapic_ipi_free(sc->vmbus_idtvec); sc->vmbus_idtvec = -1; } #if defined(__amd64__) && defined(KLD_MODULE) pmap_pti_remove_kva(VMBUS_ISR_ADDR, VMBUS_ISR_ADDR + PAGE_SIZE); #endif CPU_FOREACH(cpu) { if (VMBUS_PCPU_GET(sc, event_tq, cpu) != NULL) { taskqueue_free(VMBUS_PCPU_GET(sc, event_tq, cpu)); VMBUS_PCPU_GET(sc, event_tq, cpu) = NULL; } if (VMBUS_PCPU_GET(sc, message_tq, cpu) != NULL) { taskqueue_drain(VMBUS_PCPU_GET(sc, message_tq, cpu), VMBUS_PCPU_PTR(sc, message_task, cpu)); taskqueue_free(VMBUS_PCPU_GET(sc, message_tq, cpu)); VMBUS_PCPU_GET(sc, message_tq, cpu) = NULL; } } } static int vmbus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { return (ENOENT); } static int -vmbus_child_pnpinfo_str(device_t dev, device_t child, char *buf, size_t buflen) +vmbus_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) { const struct vmbus_channel *chan; char guidbuf[HYPERV_GUID_STRLEN]; chan = vmbus_get_channel(child); if (chan == NULL) { /* Event timer device, which does not belong to a channel */ return (0); } - strlcat(buf, "classid=", buflen); hyperv_guid2str(&chan->ch_guid_type, guidbuf, sizeof(guidbuf)); - strlcat(buf, guidbuf, buflen); + sbuf_printf(sb, "classid=%s", guidbuf); - strlcat(buf, " deviceid=", buflen); hyperv_guid2str(&chan->ch_guid_inst, guidbuf, sizeof(guidbuf)); - strlcat(buf, guidbuf, buflen); + sbuf_printf(sb, " deviceid=%s", guidbuf); return (0); } int vmbus_add_child(struct vmbus_channel *chan) { struct vmbus_softc *sc = chan->ch_vmbus; device_t parent = sc->vmbus_dev; mtx_lock(&Giant); chan->ch_dev = device_add_child(parent, NULL, -1); if (chan->ch_dev == NULL) { mtx_unlock(&Giant); device_printf(parent, "device_add_child for chan%u failed\n", chan->ch_id); return (ENXIO); } device_set_ivars(chan->ch_dev, chan); device_probe_and_attach(chan->ch_dev); mtx_unlock(&Giant); return (0); } int vmbus_delete_child(struct vmbus_channel *chan) { int error = 0; mtx_lock(&Giant); if (chan->ch_dev != NULL) { error = device_delete_child(chan->ch_vmbus->vmbus_dev, chan->ch_dev); chan->ch_dev = NULL; } mtx_unlock(&Giant); return (error); } static int vmbus_sysctl_version(SYSCTL_HANDLER_ARGS) { struct vmbus_softc *sc = arg1; char verstr[16]; snprintf(verstr, sizeof(verstr), "%u.%u", VMBUS_VERSION_MAJOR(sc->vmbus_version), VMBUS_VERSION_MINOR(sc->vmbus_version)); return sysctl_handle_string(oidp, verstr, sizeof(verstr), req); } /* * We need the function to make sure the MMIO resource is allocated from the * ranges found in _CRS. * * For the release function, we can use bus_generic_release_resource(). */ static struct resource * vmbus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { device_t parent = device_get_parent(dev); struct resource *res; #ifdef NEW_PCIB if (type == SYS_RES_MEMORY) { struct vmbus_softc *sc = device_get_softc(dev); res = pcib_host_res_alloc(&sc->vmbus_mmio_res, child, type, rid, start, end, count, flags); } else #endif { res = BUS_ALLOC_RESOURCE(parent, child, type, rid, start, end, count, flags); } return (res); } static int vmbus_alloc_msi(device_t bus, device_t dev, int count, int maxcount, int *irqs) { return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, irqs)); } static int vmbus_release_msi(device_t bus, device_t dev, int count, int *irqs) { return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs)); } static int vmbus_alloc_msix(device_t bus, device_t dev, int *irq) { return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); } static int vmbus_release_msix(device_t bus, device_t dev, int irq) { return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq)); } static int vmbus_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr, uint32_t *data) { return (PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data)); } static uint32_t vmbus_get_version_method(device_t bus, device_t dev) { struct vmbus_softc *sc = device_get_softc(bus); return sc->vmbus_version; } static int vmbus_probe_guid_method(device_t bus, device_t dev, const struct hyperv_guid *guid) { const struct vmbus_channel *chan = vmbus_get_channel(dev); if (memcmp(&chan->ch_guid_type, guid, sizeof(struct hyperv_guid)) == 0) return 0; return ENXIO; } static uint32_t vmbus_get_vcpu_id_method(device_t bus, device_t dev, int cpu) { const struct vmbus_softc *sc = device_get_softc(bus); return (VMBUS_PCPU_GET(sc, vcpuid, cpu)); } static struct taskqueue * vmbus_get_eventtq_method(device_t bus, device_t dev __unused, int cpu) { const struct vmbus_softc *sc = device_get_softc(bus); KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu%d", cpu)); return (VMBUS_PCPU_GET(sc, event_tq, cpu)); } #ifdef NEW_PCIB #define VTPM_BASE_ADDR 0xfed40000 #define FOUR_GB (1ULL << 32) enum parse_pass { parse_64, parse_32 }; struct parse_context { device_t vmbus_dev; enum parse_pass pass; }; static ACPI_STATUS parse_crs(ACPI_RESOURCE *res, void *ctx) { const struct parse_context *pc = ctx; device_t vmbus_dev = pc->vmbus_dev; struct vmbus_softc *sc = device_get_softc(vmbus_dev); UINT64 start, end; switch (res->Type) { case ACPI_RESOURCE_TYPE_ADDRESS32: start = res->Data.Address32.Address.Minimum; end = res->Data.Address32.Address.Maximum; break; case ACPI_RESOURCE_TYPE_ADDRESS64: start = res->Data.Address64.Address.Minimum; end = res->Data.Address64.Address.Maximum; break; default: /* Unused types. */ return (AE_OK); } /* * We don't use <1MB addresses. */ if (end < 0x100000) return (AE_OK); /* Don't conflict with vTPM. */ if (end >= VTPM_BASE_ADDR && start < VTPM_BASE_ADDR) end = VTPM_BASE_ADDR - 1; if ((pc->pass == parse_32 && start < FOUR_GB) || (pc->pass == parse_64 && start >= FOUR_GB)) pcib_host_res_decodes(&sc->vmbus_mmio_res, SYS_RES_MEMORY, start, end, 0); return (AE_OK); } static void vmbus_get_crs(device_t dev, device_t vmbus_dev, enum parse_pass pass) { struct parse_context pc; ACPI_STATUS status; if (bootverbose) device_printf(dev, "walking _CRS, pass=%d\n", pass); pc.vmbus_dev = vmbus_dev; pc.pass = pass; status = AcpiWalkResources(acpi_get_handle(dev), "_CRS", parse_crs, &pc); if (bootverbose && ACPI_FAILURE(status)) device_printf(dev, "_CRS: not found, pass=%d\n", pass); } static void vmbus_get_mmio_res_pass(device_t dev, enum parse_pass pass) { device_t acpi0, parent; parent = device_get_parent(dev); acpi0 = device_get_parent(parent); if (strcmp("acpi0", device_get_nameunit(acpi0)) == 0) { device_t *children; int count; /* * Try to locate VMBUS resources and find _CRS on them. */ if (device_get_children(acpi0, &children, &count) == 0) { int i; for (i = 0; i < count; ++i) { if (!device_is_attached(children[i])) continue; if (strcmp("vmbus_res", device_get_name(children[i])) == 0) vmbus_get_crs(children[i], dev, pass); } free(children, M_TEMP); } /* * Try to find _CRS on acpi. */ vmbus_get_crs(acpi0, dev, pass); } else { device_printf(dev, "not grandchild of acpi\n"); } /* * Try to find _CRS on parent. */ vmbus_get_crs(parent, dev, pass); } static void vmbus_get_mmio_res(device_t dev) { struct vmbus_softc *sc = device_get_softc(dev); /* * We walk the resources twice to make sure that: in the resource * list, the 32-bit resources appear behind the 64-bit resources. * NB: resource_list_add() uses INSERT_TAIL. This way, when we * iterate through the list to find a range for a 64-bit BAR in * vmbus_alloc_resource(), we can make sure we try to use >4GB * ranges first. */ pcib_host_res_init(dev, &sc->vmbus_mmio_res); vmbus_get_mmio_res_pass(dev, parse_64); vmbus_get_mmio_res_pass(dev, parse_32); } /* * On Gen2 VMs, Hyper-V provides mmio space for framebuffer. * This mmio address range is not useable for other PCI devices. * Currently only efifb and vbefb drivers are using this range without * reserving it from system. * Therefore, vmbus driver reserves it before any other PCI device * drivers start to request mmio addresses. */ static struct resource *hv_fb_res; static void vmbus_fb_mmio_res(device_t dev) { struct efi_fb *efifb; struct vbe_fb *vbefb; rman_res_t fb_start, fb_end, fb_count; int fb_height, fb_width; caddr_t kmdp; struct vmbus_softc *sc = device_get_softc(dev); int rid = 0; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); efifb = (struct efi_fb *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_FB); vbefb = (struct vbe_fb *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_VBE_FB); if (efifb != NULL) { fb_start = efifb->fb_addr; fb_end = efifb->fb_addr + efifb->fb_size; fb_count = efifb->fb_size; fb_height = efifb->fb_height; fb_width = efifb->fb_width; } else if (vbefb != NULL) { fb_start = vbefb->fb_addr; fb_end = vbefb->fb_addr + vbefb->fb_size; fb_count = vbefb->fb_size; fb_height = vbefb->fb_height; fb_width = vbefb->fb_width; } else { if (bootverbose) device_printf(dev, "no preloaded kernel fb information\n"); /* We are on Gen1 VM, just return. */ return; } if (bootverbose) device_printf(dev, "fb: fb_addr: %#jx, size: %#jx, " "actual size needed: 0x%x\n", fb_start, fb_count, fb_height * fb_width); hv_fb_res = pcib_host_res_alloc(&sc->vmbus_mmio_res, dev, SYS_RES_MEMORY, &rid, fb_start, fb_end, fb_count, RF_ACTIVE | rman_make_alignment_flags(PAGE_SIZE)); if (hv_fb_res && bootverbose) device_printf(dev, "successfully reserved memory for framebuffer " "starting at %#jx, size %#jx\n", fb_start, fb_count); } static void vmbus_free_mmio_res(device_t dev) { struct vmbus_softc *sc = device_get_softc(dev); pcib_host_res_free(dev, &sc->vmbus_mmio_res); if (hv_fb_res) hv_fb_res = NULL; } #endif /* NEW_PCIB */ static void vmbus_identify(driver_t *driver, device_t parent) { if (device_get_unit(parent) != 0 || vm_guest != VM_GUEST_HV || (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) return; device_add_child(parent, "vmbus", -1); } static int vmbus_probe(device_t dev) { if (device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV || (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) return (ENXIO); device_set_desc(dev, "Hyper-V Vmbus"); return (BUS_PROBE_DEFAULT); } /** * @brief Main vmbus driver initialization routine. * * Here, we * - initialize the vmbus driver context * - setup various driver entry points * - invoke the vmbus hv main init routine * - get the irq resource * - invoke the vmbus to add the vmbus root device * - setup the vmbus root device * - retrieve the channel offers */ static int vmbus_doattach(struct vmbus_softc *sc) { struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; int ret; if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED) return (0); #ifdef NEW_PCIB vmbus_get_mmio_res(sc->vmbus_dev); vmbus_fb_mmio_res(sc->vmbus_dev); #endif sc->vmbus_flags |= VMBUS_FLAG_ATTACHED; sc->vmbus_gpadl = VMBUS_GPADL_START; mtx_init(&sc->vmbus_prichan_lock, "vmbus prichan", NULL, MTX_DEF); TAILQ_INIT(&sc->vmbus_prichans); mtx_init(&sc->vmbus_chan_lock, "vmbus channel", NULL, MTX_DEF); TAILQ_INIT(&sc->vmbus_chans); sc->vmbus_chmap = malloc( sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX, M_DEVBUF, M_WAITOK | M_ZERO); /* * Create context for "post message" Hypercalls */ sc->vmbus_xc = vmbus_xact_ctx_create(bus_get_dma_tag(sc->vmbus_dev), HYPERCALL_POSTMSGIN_SIZE, VMBUS_MSG_SIZE, sizeof(struct vmbus_msghc)); if (sc->vmbus_xc == NULL) { ret = ENXIO; goto cleanup; } /* * Allocate DMA stuffs. */ ret = vmbus_dma_alloc(sc); if (ret != 0) goto cleanup; /* * Setup interrupt. */ ret = vmbus_intr_setup(sc); if (ret != 0) goto cleanup; /* * Setup SynIC. */ if (bootverbose) device_printf(sc->vmbus_dev, "smp_started = %d\n", smp_started); smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc); sc->vmbus_flags |= VMBUS_FLAG_SYNIC; /* * Initialize vmbus, e.g. connect to Hypervisor. */ ret = vmbus_init(sc); if (ret != 0) goto cleanup; if (sc->vmbus_version == VMBUS_VERSION_WS2008 || sc->vmbus_version == VMBUS_VERSION_WIN7) sc->vmbus_event_proc = vmbus_event_proc_compat; else sc->vmbus_event_proc = vmbus_event_proc; ret = vmbus_scan(sc); if (ret != 0) goto cleanup; ctx = device_get_sysctl_ctx(sc->vmbus_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vmbus_dev)); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "version", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, vmbus_sysctl_version, "A", "vmbus version"); return (ret); cleanup: vmbus_scan_teardown(sc); vmbus_intr_teardown(sc); vmbus_dma_free(sc); if (sc->vmbus_xc != NULL) { vmbus_xact_ctx_destroy(sc->vmbus_xc); sc->vmbus_xc = NULL; } free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF); mtx_destroy(&sc->vmbus_prichan_lock); mtx_destroy(&sc->vmbus_chan_lock); return (ret); } static void vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused) { } #ifdef EARLY_AP_STARTUP static void vmbus_intrhook(void *xsc) { struct vmbus_softc *sc = xsc; if (bootverbose) device_printf(sc->vmbus_dev, "intrhook\n"); vmbus_doattach(sc); config_intrhook_disestablish(&sc->vmbus_intrhook); } #endif /* EARLY_AP_STARTUP */ static int vmbus_attach(device_t dev) { vmbus_sc = device_get_softc(dev); vmbus_sc->vmbus_dev = dev; vmbus_sc->vmbus_idtvec = -1; /* * Event processing logic will be configured: * - After the vmbus protocol version negotiation. * - Before we request channel offers. */ vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy; #ifdef EARLY_AP_STARTUP /* * Defer the real attach until the pause(9) works as expected. */ vmbus_sc->vmbus_intrhook.ich_func = vmbus_intrhook; vmbus_sc->vmbus_intrhook.ich_arg = vmbus_sc; config_intrhook_establish(&vmbus_sc->vmbus_intrhook); #else /* !EARLY_AP_STARTUP */ /* * If the system has already booted and thread * scheduling is possible indicated by the global * cold set to zero, we just call the driver * initialization directly. */ if (!cold) vmbus_doattach(vmbus_sc); #endif /* EARLY_AP_STARTUP */ return (0); } static int vmbus_detach(device_t dev) { struct vmbus_softc *sc = device_get_softc(dev); bus_generic_detach(dev); vmbus_chan_destroy_all(sc); vmbus_scan_teardown(sc); vmbus_disconnect(sc); if (sc->vmbus_flags & VMBUS_FLAG_SYNIC) { sc->vmbus_flags &= ~VMBUS_FLAG_SYNIC; smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL); } vmbus_intr_teardown(sc); vmbus_dma_free(sc); if (sc->vmbus_xc != NULL) { vmbus_xact_ctx_destroy(sc->vmbus_xc); sc->vmbus_xc = NULL; } free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF); mtx_destroy(&sc->vmbus_prichan_lock); mtx_destroy(&sc->vmbus_chan_lock); #ifdef NEW_PCIB vmbus_free_mmio_res(dev); #endif return (0); } #ifndef EARLY_AP_STARTUP static void vmbus_sysinit(void *arg __unused) { struct vmbus_softc *sc = vmbus_get_softc(); if (vm_guest != VM_GUEST_HV || sc == NULL) return; /* * If the system has already booted and thread * scheduling is possible, as indicated by the * global cold set to zero, we just call the driver * initialization directly. */ if (!cold) vmbus_doattach(sc); } /* * NOTE: * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is * initialized. */ SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL); #endif /* !EARLY_AP_STARTUP */ diff --git a/sys/dev/iicbus/acpi_iicbus.c b/sys/dev/iicbus/acpi_iicbus.c index 33c0cdba4058..76920dcfc7ef 100644 --- a/sys/dev/iicbus/acpi_iicbus.c +++ b/sys/dev/iicbus/acpi_iicbus.c @@ -1,788 +1,774 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019-2020 Vladimir Kondratyev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #define ACPI_IICBUS_LOCAL_BUFSIZE 32 /* Fits max SMBUS block size */ /* * Make a copy of ACPI_RESOURCE_I2C_SERIALBUS type and replace "pointer to ACPI * object name string" field with pointer to ACPI object itself. * This saves us extra strdup()/free() pair on acpi_iicbus_get_i2cres call. */ typedef ACPI_RESOURCE_I2C_SERIALBUS ACPI_IICBUS_RESOURCE_I2C_SERIALBUS; #define ResourceSource_Handle ResourceSource.StringPtr /* Hooks for the ACPI CA debugging infrastructure. */ #define _COMPONENT ACPI_BUS ACPI_MODULE_NAME("IIC") struct gsb_buffer { UINT8 status; UINT8 len; UINT8 data[]; } __packed; struct acpi_iicbus_softc { struct iicbus_softc super_sc; ACPI_CONNECTION_INFO space_handler_info; bool space_handler_installed; }; struct acpi_iicbus_ivars { struct iicbus_ivar super_ivar; ACPI_HANDLE handle; }; static int install_space_handler = 0; TUNABLE_INT("hw.iicbus.enable_acpi_space_handler", &install_space_handler); static inline bool acpi_resource_is_i2c_serialbus(ACPI_RESOURCE *res) { return (res->Type == ACPI_RESOURCE_TYPE_SERIAL_BUS && res->Data.CommonSerialBus.Type == ACPI_RESOURCE_SERIAL_TYPE_I2C); } /* * IICBUS Address space handler */ static int acpi_iicbus_sendb(device_t dev, u_char slave, char byte) { struct iic_msg msgs[] = { { slave, IIC_M_WR, 1, &byte }, }; return (iicbus_transfer(dev, msgs, nitems(msgs))); } static int acpi_iicbus_recvb(device_t dev, u_char slave, char *byte) { char buf; struct iic_msg msgs[] = { { slave, IIC_M_RD, 1, &buf }, }; int error; error = iicbus_transfer(dev, msgs, nitems(msgs)); if (error == 0) *byte = buf; return (error); } static int acpi_iicbus_write(device_t dev, u_char slave, char cmd, void *buf, uint16_t buflen) { struct iic_msg msgs[] = { { slave, IIC_M_WR | IIC_M_NOSTOP, 1, &cmd }, { slave, IIC_M_WR | IIC_M_NOSTART, buflen, buf }, }; return (iicbus_transfer(dev, msgs, nitems(msgs))); } static int acpi_iicbus_read(device_t dev, u_char slave, char cmd, void *buf, uint16_t buflen) { uint8_t local_buffer[ACPI_IICBUS_LOCAL_BUFSIZE]; struct iic_msg msgs[] = { { slave, IIC_M_WR | IIC_M_NOSTOP, 1, &cmd }, { slave, IIC_M_RD, buflen, NULL }, }; int error; if (buflen <= sizeof(local_buffer)) msgs[1].buf = local_buffer; else msgs[1].buf = malloc(buflen, M_DEVBUF, M_WAITOK); error = iicbus_transfer(dev, msgs, nitems(msgs)); if (error == 0) memcpy(buf, msgs[1].buf, buflen); if (msgs[1].buf != local_buffer) free(msgs[1].buf, M_DEVBUF); return (error); } static int acpi_iicbus_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf) { uint8_t bytes[2] = { cmd, count }; struct iic_msg msgs[] = { { slave, IIC_M_WR | IIC_M_NOSTOP, nitems(bytes), bytes }, { slave, IIC_M_WR | IIC_M_NOSTART, count, buf }, }; if (count == 0) return (errno2iic(EINVAL)); return (iicbus_transfer(dev, msgs, nitems(msgs))); } static int acpi_iicbus_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf) { uint8_t local_buffer[ACPI_IICBUS_LOCAL_BUFSIZE]; u_char len; struct iic_msg msgs[] = { { slave, IIC_M_WR | IIC_M_NOSTOP, 1, &cmd }, { slave, IIC_M_RD | IIC_M_NOSTOP, 1, &len }, }; struct iic_msg block_msg[] = { { slave, IIC_M_RD | IIC_M_NOSTART, 0, NULL }, }; device_t parent = device_get_parent(dev); int error; /* Have to do this because the command is split in two transfers. */ error = iicbus_request_bus(parent, dev, IIC_WAIT); if (error == 0) error = iicbus_transfer(dev, msgs, nitems(msgs)); if (error == 0) { /* * If the slave offers an empty reply, * read one byte to generate the stop or abort. */ if (len == 0) block_msg[0].len = 1; else block_msg[0].len = len; if (len <= sizeof(local_buffer)) block_msg[0].buf = local_buffer; else block_msg[0].buf = malloc(len, M_DEVBUF, M_WAITOK); error = iicbus_transfer(dev, block_msg, nitems(block_msg)); if (len == 0) error = errno2iic(EBADMSG); if (error == 0) { *count = len; memcpy(buf, block_msg[0].buf, len); } if (block_msg[0].buf != local_buffer) free(block_msg[0].buf, M_DEVBUF); } (void)iicbus_release_bus(parent, dev); return (error); } static ACPI_STATUS acpi_iicbus_space_handler(UINT32 Function, ACPI_PHYSICAL_ADDRESS Address, UINT32 BitWidth, UINT64 *Value, void *HandlerContext, void *RegionContext) { struct gsb_buffer *gsb; struct acpi_iicbus_softc *sc; device_t dev; ACPI_CONNECTION_INFO *info; ACPI_RESOURCE_I2C_SERIALBUS *sb; ACPI_RESOURCE *res; ACPI_STATUS s; int val; gsb = (struct gsb_buffer *)Value; if (gsb == NULL) return (AE_BAD_PARAMETER); info = HandlerContext; s = AcpiBufferToResource(info->Connection, info->Length, &res); if (ACPI_FAILURE(s)) return (s); if (!acpi_resource_is_i2c_serialbus(res)) { s = AE_BAD_PARAMETER; goto err; } sb = &res->Data.I2cSerialBus; /* XXX Ignore 10bit addressing for now */ if (sb->AccessMode == ACPI_I2C_10BIT_MODE) { s = AE_BAD_PARAMETER; goto err; } #define AML_FIELD_ATTRIB_MASK 0x0F #define AML_FIELD_ATTRIO(attr, io) (((attr) << 16) | (io)) Function &= AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_MASK, ACPI_IO_MASK); sc = __containerof(info, struct acpi_iicbus_softc, space_handler_info); dev = sc->super_sc.dev; switch (Function) { case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_SEND_RECEIVE, ACPI_READ): val = acpi_iicbus_recvb(dev, sb->SlaveAddress, gsb->data); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_SEND_RECEIVE, ACPI_WRITE): val = acpi_iicbus_sendb(dev, sb->SlaveAddress, gsb->data[0]); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_BYTE, ACPI_READ): val = acpi_iicbus_read(dev, sb->SlaveAddress, Address, gsb->data, 1); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_BYTE, ACPI_WRITE): val = acpi_iicbus_write(dev, sb->SlaveAddress, Address, gsb->data, 1); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_WORD, ACPI_READ): val = acpi_iicbus_read(dev, sb->SlaveAddress, Address, gsb->data, 2); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_WORD, ACPI_WRITE): val = acpi_iicbus_write(dev, sb->SlaveAddress, Address, gsb->data, 2); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_BLOCK, ACPI_READ): val = acpi_iicbus_bread(dev, sb->SlaveAddress, Address, &gsb->len, gsb->data); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_BLOCK, ACPI_WRITE): val = acpi_iicbus_bwrite(dev, sb->SlaveAddress, Address, gsb->len, gsb->data); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_BYTES, ACPI_READ): val = acpi_iicbus_read(dev, sb->SlaveAddress, Address, gsb->data, info->AccessLength); break; case AML_FIELD_ATTRIO(AML_FIELD_ATTRIB_BYTES, ACPI_WRITE): val = acpi_iicbus_write(dev, sb->SlaveAddress, Address, gsb->data, info->AccessLength); break; default: device_printf(dev, "protocol(0x%04x) is not supported.\n", Function); s = AE_BAD_PARAMETER; goto err; } gsb->status = val; err: ACPI_FREE(res); return (s); } static int acpi_iicbus_install_address_space_handler(struct acpi_iicbus_softc *sc) { ACPI_HANDLE handle; ACPI_STATUS s; handle = acpi_get_handle(device_get_parent(sc->super_sc.dev)); s = AcpiInstallAddressSpaceHandler(handle, ACPI_ADR_SPACE_GSBUS, &acpi_iicbus_space_handler, NULL, &sc->space_handler_info); if (ACPI_FAILURE(s)) { device_printf(sc->super_sc.dev, "Failed to install GSBUS Address Space Handler in ACPI\n"); return (ENXIO); } return (0); } static int acpi_iicbus_remove_address_space_handler(struct acpi_iicbus_softc *sc) { ACPI_HANDLE handle; ACPI_STATUS s; handle = acpi_get_handle(device_get_parent(sc->super_sc.dev)); s = AcpiRemoveAddressSpaceHandler(handle, ACPI_ADR_SPACE_GSBUS, &acpi_iicbus_space_handler); if (ACPI_FAILURE(s)) { device_printf(sc->super_sc.dev, "Failed to remove GSBUS Address Space Handler from ACPI\n"); return (ENXIO); } return (0); } static ACPI_STATUS acpi_iicbus_get_i2cres_cb(ACPI_RESOURCE *res, void *context) { ACPI_IICBUS_RESOURCE_I2C_SERIALBUS *sb = context; ACPI_STATUS status; ACPI_HANDLE handle; if (acpi_resource_is_i2c_serialbus(res)) { status = AcpiGetHandle(ACPI_ROOT_OBJECT, res->Data.I2cSerialBus.ResourceSource.StringPtr, &handle); if (ACPI_FAILURE(status)) return (status); memcpy(sb, &res->Data.I2cSerialBus, sizeof(ACPI_IICBUS_RESOURCE_I2C_SERIALBUS)); /* * replace "pointer to ACPI object name string" field * with pointer to ACPI object itself. */ sb->ResourceSource_Handle = handle; return (AE_CTRL_TERMINATE); } else if (res->Type == ACPI_RESOURCE_TYPE_END_TAG) return (AE_NOT_FOUND); return (AE_OK); } static ACPI_STATUS acpi_iicbus_get_i2cres(ACPI_HANDLE handle, ACPI_RESOURCE_I2C_SERIALBUS *sb) { return (AcpiWalkResources(handle, "_CRS", acpi_iicbus_get_i2cres_cb, sb)); } static ACPI_STATUS acpi_iicbus_parse_resources_cb(ACPI_RESOURCE *res, void *context) { device_t dev = context; struct iicbus_ivar *super_devi = device_get_ivars(dev); struct resource_list *rl = &super_devi->rl; int irq, gpio_pin; switch(res->Type) { case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: if (res->Data.ExtendedIrq.InterruptCount > 0) { irq = res->Data.ExtendedIrq.Interrupts[0]; if (bootverbose) printf(" IRQ: %d\n", irq); resource_list_add_next(rl, SYS_RES_IRQ, irq, irq, 1); return (AE_CTRL_TERMINATE); } break; case ACPI_RESOURCE_TYPE_GPIO: if (res->Data.Gpio.ConnectionType == ACPI_RESOURCE_GPIO_TYPE_INT) { /* Not supported by FreeBSD yet */ gpio_pin = res->Data.Gpio.PinTable[0]; if (bootverbose) printf(" GPIO IRQ pin: %d\n", gpio_pin); return (AE_CTRL_TERMINATE); } break; default: break; } return (AE_OK); } static ACPI_STATUS acpi_iicbus_parse_resources(ACPI_HANDLE handle, device_t dev) { return (AcpiWalkResources(handle, "_CRS", acpi_iicbus_parse_resources_cb, dev)); } static void acpi_iicbus_dump_res(device_t dev, ACPI_IICBUS_RESOURCE_I2C_SERIALBUS *sb) { device_printf(dev, "found ACPI child\n"); printf(" SlaveAddress: 0x%04hx\n", sb->SlaveAddress); printf(" ConnectionSpeed: %uHz\n", sb->ConnectionSpeed); printf(" SlaveMode: %s\n", sb->SlaveMode == ACPI_CONTROLLER_INITIATED ? "ControllerInitiated" : "DeviceInitiated"); printf(" AddressingMode: %uBit\n", sb->AccessMode == 0 ? 7 : 10); printf(" ConnectionSharing: %s\n", sb->ConnectionSharing == 0 ? "Exclusive" : "Shared"); } static device_t acpi_iicbus_add_child(device_t dev, u_int order, const char *name, int unit) { return (iicbus_add_child_common( dev, order, name, unit, sizeof(struct acpi_iicbus_ivars))); } static ACPI_STATUS acpi_iicbus_enumerate_child(ACPI_HANDLE handle, UINT32 level, void *context, void **result) { device_t iicbus, child, acpi_child, acpi0; struct iicbus_softc *super_sc; ACPI_IICBUS_RESOURCE_I2C_SERIALBUS sb; ACPI_STATUS status; UINT32 sta; iicbus = context; super_sc = device_get_softc(iicbus); /* * If no _STA method or if it failed, then assume that * the device is present. */ if (!ACPI_FAILURE(acpi_GetInteger(handle, "_STA", &sta)) && !ACPI_DEVICE_PRESENT(sta)) return (AE_OK); if (!acpi_has_hid(handle)) return (AE_OK); /* * Read "I2C Serial Bus Connection Resource Descriptor" * described in p.19.6.57 of ACPI specification. */ bzero(&sb, sizeof(ACPI_IICBUS_RESOURCE_I2C_SERIALBUS)); if (ACPI_FAILURE(acpi_iicbus_get_i2cres(handle, &sb)) || sb.SlaveAddress == 0) return (AE_OK); if (sb.ResourceSource_Handle != acpi_get_handle(device_get_parent(iicbus))) return (AE_OK); if (bootverbose) acpi_iicbus_dump_res(iicbus, &sb); /* Find out speed of the slowest slave */ if (super_sc->bus_freq == 0 || super_sc->bus_freq > sb.ConnectionSpeed) super_sc->bus_freq = sb.ConnectionSpeed; /* Delete existing child of acpi bus */ acpi_child = acpi_get_device(handle); if (acpi_child != NULL) { acpi0 = devclass_get_device(devclass_find("acpi"), 0); if (device_get_parent(acpi_child) != acpi0) return (AE_OK); if (device_is_attached(acpi_child)) return (AE_OK); if (device_delete_child(acpi0, acpi_child) != 0) return (AE_OK); } child = BUS_ADD_CHILD(iicbus, 0, NULL, -1); if (child == NULL) { device_printf(iicbus, "add child failed\n"); return (AE_OK); } iicbus_set_addr(child, sb.SlaveAddress); acpi_set_handle(child, handle); (void)acpi_iicbus_parse_resources(handle, child); /* * Update ACPI-CA to use the IIC enumerated device_t for this handle. */ status = AcpiAttachData(handle, acpi_fake_objhandler, child); if (ACPI_FAILURE(status)) printf("WARNING: Unable to attach object data to %s - %s\n", acpi_name(handle), AcpiFormatException(status)); return (AE_OK); } static ACPI_STATUS acpi_iicbus_enumerate_children(device_t dev) { return (AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, acpi_iicbus_enumerate_child, NULL, dev, NULL)); } static void acpi_iicbus_set_power_children(device_t dev, int state, bool all_children) { device_t *devlist; int i, numdevs; if (device_get_children(dev, &devlist, &numdevs) != 0) return; for (i = 0; i < numdevs; i++) if (all_children || device_is_attached(devlist[i]) != 0) acpi_set_powerstate(devlist[i], state); free(devlist, M_TEMP); } static int acpi_iicbus_probe(device_t dev) { ACPI_HANDLE handle; device_t controller; if (acpi_disabled("iicbus")) return (ENXIO); controller = device_get_parent(dev); if (controller == NULL) return (ENXIO); handle = acpi_get_handle(controller); if (handle == NULL) return (ENXIO); device_set_desc(dev, "Philips I2C bus (ACPI-hinted)"); return (BUS_PROBE_DEFAULT); } static int acpi_iicbus_attach(device_t dev) { struct acpi_iicbus_softc *sc = device_get_softc(dev); int error; if (ACPI_FAILURE(acpi_iicbus_enumerate_children(dev))) device_printf(dev, "children enumeration failed\n"); acpi_iicbus_set_power_children(dev, ACPI_STATE_D0, true); error = iicbus_attach_common(dev, sc->super_sc.bus_freq); if (error == 0 && install_space_handler != 0 && acpi_iicbus_install_address_space_handler(sc) == 0) sc->space_handler_installed = true; return (error); } static int acpi_iicbus_detach(device_t dev) { struct acpi_iicbus_softc *sc = device_get_softc(dev); if (sc->space_handler_installed) acpi_iicbus_remove_address_space_handler(sc); acpi_iicbus_set_power_children(dev, ACPI_STATE_D3, false); return (iicbus_detach(dev)); } static int acpi_iicbus_suspend(device_t dev) { int error; error = bus_generic_suspend(dev); if (error == 0) acpi_iicbus_set_power_children(dev, ACPI_STATE_D3, false); return (error); } static int acpi_iicbus_resume(device_t dev) { acpi_iicbus_set_power_children(dev, ACPI_STATE_D0, false); return (bus_generic_resume(dev)); } /* * If this device is an ACPI child but no one claimed it, attempt * to power it off. We'll power it back up when a driver is added. */ static void acpi_iicbus_probe_nomatch(device_t bus, device_t child) { iicbus_probe_nomatch(bus, child); acpi_set_powerstate(child, ACPI_STATE_D3); } /* * If a new driver has a chance to probe a child, first power it up. */ static void acpi_iicbus_driver_added(device_t dev, driver_t *driver) { device_t child, *devlist; int i, numdevs; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs) != 0) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) == DS_NOTPRESENT) { acpi_set_powerstate(child, ACPI_STATE_D0); if (device_probe_and_attach(child) != 0) acpi_set_powerstate(child, ACPI_STATE_D3); } } free(devlist, M_TEMP); } static void acpi_iicbus_child_deleted(device_t bus, device_t child) { struct acpi_iicbus_ivars *devi = device_get_ivars(child); if (acpi_get_device(devi->handle) == child) AcpiDetachData(devi->handle, acpi_fake_objhandler); } static int acpi_iicbus_read_ivar(device_t bus, device_t child, int which, uintptr_t *res) { struct acpi_iicbus_ivars *devi = device_get_ivars(child); switch (which) { case ACPI_IVAR_HANDLE: *res = (uintptr_t)devi->handle; break; default: return (iicbus_read_ivar(bus, child, which, res)); } return (0); } static int acpi_iicbus_write_ivar(device_t bus, device_t child, int which, uintptr_t val) { struct acpi_iicbus_ivars *devi = device_get_ivars(child); switch (which) { case ACPI_IVAR_HANDLE: if (devi->handle != NULL) return (EINVAL); devi->handle = (ACPI_HANDLE)val; break; default: return (iicbus_write_ivar(bus, child, which, val)); } return (0); } /* Location hint for devctl(8). Concatenate IIC and ACPI hints. */ static int -acpi_iicbus_child_location_str(device_t bus, device_t child, - char *buf, size_t buflen) +acpi_iicbus_child_location(device_t bus, device_t child, struct sbuf *sb) { struct acpi_iicbus_ivars *devi = device_get_ivars(child); int error; /* read IIC location hint string into the buffer. */ - error = iicbus_child_location_str(bus, child, buf, buflen); + error = iicbus_child_location(bus, child, sb); if (error != 0) return (error); /* Place ACPI string right after IIC one's terminating NUL. */ - if (devi->handle != NULL && - ((buf[0] != '\0' && strlcat(buf, " ", buflen) >= buflen) || - strlcat(buf, "handle=", buflen) >= buflen || - strlcat(buf, acpi_name(devi->handle), buflen) >= buflen)) - return (EOVERFLOW); + if (devi->handle != NULL) + sbuf_printf(sb, " handle=%s", acpi_name(devi->handle)); return (0); } /* PnP information for devctl(8). Concatenate IIC and ACPI info strings. */ static int -acpi_iicbus_child_pnpinfo_str(device_t bus, device_t child, char *buf, - size_t buflen) +acpi_iicbus_child_pnpinfo(device_t bus, device_t child, struct sbuf *sb) { struct acpi_iicbus_ivars *devi = device_get_ivars(child); - size_t acpi_offset; int error; /* read IIC PnP string into the buffer. */ - error = iicbus_child_pnpinfo_str(bus, child, buf, buflen); + error = iicbus_child_pnpinfo(bus, child, sb); if (error != 0) return (error); if (devi->handle == NULL) return (0); - /* Place ACPI string right after IIC one's terminating NUL. */ - acpi_offset = strlen(buf); - if (acpi_offset != 0) - acpi_offset++; - error = acpi_pnpinfo_str(devi->handle, buf + acpi_offset, - buflen - acpi_offset); - - /* Coalesce both strings if they are not empty. */ - if (acpi_offset > 0 && acpi_offset < buflen && buf[acpi_offset] != 0) - buf[acpi_offset - 1] = ' '; + error = acpi_pnpinfo(devi->handle, sb); return (error); } static device_method_t acpi_iicbus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, acpi_iicbus_probe), DEVMETHOD(device_attach, acpi_iicbus_attach), DEVMETHOD(device_detach, acpi_iicbus_detach), DEVMETHOD(device_suspend, acpi_iicbus_suspend), DEVMETHOD(device_resume, acpi_iicbus_resume), /* Bus interface */ DEVMETHOD(bus_add_child, acpi_iicbus_add_child), DEVMETHOD(bus_probe_nomatch, acpi_iicbus_probe_nomatch), DEVMETHOD(bus_driver_added, acpi_iicbus_driver_added), DEVMETHOD(bus_child_deleted, acpi_iicbus_child_deleted), DEVMETHOD(bus_read_ivar, acpi_iicbus_read_ivar), DEVMETHOD(bus_write_ivar, acpi_iicbus_write_ivar), - DEVMETHOD(bus_child_location_str,acpi_iicbus_child_location_str), - DEVMETHOD(bus_child_pnpinfo_str,acpi_iicbus_child_pnpinfo_str), + DEVMETHOD(bus_child_location, acpi_iicbus_child_location), + DEVMETHOD(bus_child_pnpinfo, acpi_iicbus_child_pnpinfo), DEVMETHOD_END, }; DEFINE_CLASS_1(iicbus, acpi_iicbus_driver, acpi_iicbus_methods, sizeof(struct acpi_iicbus_softc), iicbus_driver); MODULE_VERSION(acpi_iicbus, 1); MODULE_DEPEND(acpi_iicbus, acpi, 1, 1, 1); diff --git a/sys/dev/iicbus/iicbus.c b/sys/dev/iicbus/iicbus.c index b16a2bdba332..dd6b484e395b 100644 --- a/sys/dev/iicbus/iicbus.c +++ b/sys/dev/iicbus/iicbus.c @@ -1,391 +1,389 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1998, 2001 Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Autoconfiguration and support routines for the Philips serial I2C bus */ #include #include #include +#include #include #include #include #include #include +#include #include -#include #include #include #include "iicbus_if.h" /* See comments below for why auto-scanning is a bad idea. */ #define SCAN_IICBUS 0 SYSCTL_NODE(_hw, OID_AUTO, i2c, CTLFLAG_RW, 0, "i2c controls"); static int iicbus_probe(device_t dev) { device_set_desc(dev, "Philips I2C bus"); /* Allow other subclasses to override this driver. */ return (BUS_PROBE_GENERIC); } #if SCAN_IICBUS static int iic_probe_device(device_t dev, u_char addr) { int count; char byte; if ((addr & 1) == 0) { /* is device writable? */ if (!iicbus_start(dev, (u_char)addr, 0)) { iicbus_stop(dev); return (1); } } else { /* is device readable? */ if (!iicbus_block_read(dev, (u_char)addr, &byte, 1, &count)) return (1); } return (0); } #endif /* * We add all the devices which we know about. * The generic attach routine will attach them if they are alive. */ int iicbus_attach_common(device_t dev, u_int bus_freq) { #if SCAN_IICBUS unsigned char addr; #endif struct iicbus_softc *sc = IICBUS_SOFTC(dev); int strict; sc->dev = dev; mtx_init(&sc->lock, "iicbus", NULL, MTX_DEF); iicbus_init_frequency(dev, bus_freq); iicbus_reset(dev, IIC_FASTEST, 0, NULL); if (resource_int_value(device_get_name(dev), device_get_unit(dev), "strict", &strict) == 0) sc->strict = strict; else sc->strict = 1; /* device probing is meaningless since the bus is supposed to be * hot-plug. Moreover, some I2C chips do not appreciate random * accesses like stop after start to fast, reads for less than * x bytes... */ #if SCAN_IICBUS printf("Probing for devices on iicbus%d:", device_get_unit(dev)); /* probe any devices */ for (addr = 16; addr < 240; addr++) { if (iic_probe_device(dev, (u_char)addr)) { printf(" <%x>", addr); } } printf("\n"); #endif bus_generic_probe(dev); bus_enumerate_hinted_children(dev); bus_generic_attach(dev); return (0); } static int iicbus_attach(device_t dev) { return (iicbus_attach_common(dev, 0)); } int iicbus_detach(device_t dev) { struct iicbus_softc *sc = IICBUS_SOFTC(dev); int err; if ((err = device_delete_children(dev)) != 0) return (err); iicbus_reset(dev, IIC_FASTEST, 0, NULL); mtx_destroy(&sc->lock); return (0); } static int iicbus_print_child(device_t dev, device_t child) { struct iicbus_ivar *devi = IICBUS_IVAR(child); int retval = 0; retval += bus_print_child_header(dev, child); if (devi->addr != 0) retval += printf(" at addr %#x", devi->addr); resource_list_print_type(&devi->rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } void iicbus_probe_nomatch(device_t bus, device_t child) { struct iicbus_ivar *devi = IICBUS_IVAR(child); device_printf(bus, " at addr %#x\n", devi->addr); } int -iicbus_child_location_str(device_t bus, device_t child, char *buf, - size_t buflen) +iicbus_child_location(device_t bus, device_t child, struct sbuf *sb) { struct iicbus_ivar *devi = IICBUS_IVAR(child); - snprintf(buf, buflen, "addr=%#x", devi->addr); + sbuf_printf(sb, "addr=%#x", devi->addr); return (0); } int -iicbus_child_pnpinfo_str(device_t bus, device_t child, char *buf, - size_t buflen) +iicbus_child_pnpinfo(device_t bus, device_t child, struct sbuf *sb) { - *buf = '\0'; return (0); } int iicbus_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct iicbus_ivar *devi = IICBUS_IVAR(child); switch (which) { default: return (EINVAL); case IICBUS_IVAR_ADDR: *result = devi->addr; break; } return (0); } int iicbus_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct iicbus_ivar *devi = IICBUS_IVAR(child); switch (which) { default: return (EINVAL); case IICBUS_IVAR_ADDR: if (devi->addr != 0) return (EINVAL); devi->addr = value; } return (0); } device_t iicbus_add_child_common(device_t dev, u_int order, const char *name, int unit, size_t ivars_size) { device_t child; struct iicbus_ivar *devi; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(ivars_size, M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (0); } resource_list_init(&devi->rl); device_set_ivars(child, devi); return (child); } static device_t iicbus_add_child(device_t dev, u_int order, const char *name, int unit) { return (iicbus_add_child_common( dev, order, name, unit, sizeof(struct iicbus_ivar))); } static void iicbus_hinted_child(device_t bus, const char *dname, int dunit) { device_t child; int irq; struct iicbus_ivar *devi; child = BUS_ADD_CHILD(bus, 0, dname, dunit); devi = IICBUS_IVAR(child); resource_int_value(dname, dunit, "addr", &devi->addr); if (resource_int_value(dname, dunit, "irq", &irq) == 0) { if (bus_set_resource(child, SYS_RES_IRQ, 0, irq, 1) != 0) device_printf(bus, "warning: bus_set_resource() failed\n"); } } static struct resource_list * iicbus_get_resource_list(device_t bus __unused, device_t child) { struct iicbus_ivar *devi; devi = IICBUS_IVAR(child); return (&devi->rl); } int iicbus_generic_intr(device_t dev, int event, char *buf) { return (0); } int iicbus_null_callback(device_t dev, int index, caddr_t data) { return (0); } int iicbus_null_repeated_start(device_t dev, u_char addr) { return (IIC_ENOTSUPP); } void iicbus_init_frequency(device_t dev, u_int bus_freq) { struct iicbus_softc *sc = IICBUS_SOFTC(dev); /* * If a bus frequency value was passed in, use it. Otherwise initialize * it first to the standard i2c 100KHz frequency, then override that * from a hint if one exists. */ if (bus_freq > 0) sc->bus_freq = bus_freq; else { sc->bus_freq = 100000; resource_int_value(device_get_name(dev), device_get_unit(dev), "frequency", (int *)&sc->bus_freq); } /* * Set up the sysctl that allows the bus frequency to be changed. * It is flagged as a tunable so that the user can set the value in * loader(8), and that will override any other setting from any source. * The sysctl tunable/value is the one most directly controlled by the * user and thus the one that always takes precedence. */ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "frequency", CTLFLAG_RW | CTLFLAG_TUN, &sc->bus_freq, sc->bus_freq, "Bus frequency in Hz"); } static u_int iicbus_get_frequency(device_t dev, u_char speed) { struct iicbus_softc *sc = IICBUS_SOFTC(dev); /* * If the frequency has not been configured for the bus, or the request * is specifically for SLOW speed, use the standard 100KHz rate, else * use the configured bus speed. */ if (sc->bus_freq == 0 || speed == IIC_SLOW) return (100000); return (sc->bus_freq); } static device_method_t iicbus_methods[] = { /* device interface */ DEVMETHOD(device_probe, iicbus_probe), DEVMETHOD(device_attach, iicbus_attach), DEVMETHOD(device_detach, iicbus_detach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* bus interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource_list, iicbus_get_resource_list), DEVMETHOD(bus_add_child, iicbus_add_child), DEVMETHOD(bus_print_child, iicbus_print_child), DEVMETHOD(bus_probe_nomatch, iicbus_probe_nomatch), DEVMETHOD(bus_read_ivar, iicbus_read_ivar), DEVMETHOD(bus_write_ivar, iicbus_write_ivar), - DEVMETHOD(bus_child_pnpinfo_str, iicbus_child_pnpinfo_str), - DEVMETHOD(bus_child_location_str, iicbus_child_location_str), + DEVMETHOD(bus_child_pnpinfo, iicbus_child_pnpinfo), + DEVMETHOD(bus_child_location, iicbus_child_location), DEVMETHOD(bus_hinted_child, iicbus_hinted_child), /* iicbus interface */ DEVMETHOD(iicbus_transfer, iicbus_transfer), DEVMETHOD(iicbus_get_frequency, iicbus_get_frequency), DEVMETHOD_END }; driver_t iicbus_driver = { "iicbus", iicbus_methods, sizeof(struct iicbus_softc), }; devclass_t iicbus_devclass; MODULE_VERSION(iicbus, IICBUS_MODVER); DRIVER_MODULE(iicbus, iichb, iicbus_driver, iicbus_devclass, 0, 0); diff --git a/sys/dev/iicbus/iicbus.h b/sys/dev/iicbus/iicbus.h index d3c3f77d5455..ec4f841395d5 100644 --- a/sys/dev/iicbus/iicbus.h +++ b/sys/dev/iicbus/iicbus.h @@ -1,109 +1,107 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1998 Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef __IICBUS_H #define __IICBUS_H #include #include #define IICBUS_IVAR(d) (struct iicbus_ivar *) device_get_ivars(d) #define IICBUS_SOFTC(d) (struct iicbus_softc *) device_get_softc(d) struct iicbus_softc { device_t dev; /* Myself */ device_t owner; /* iicbus owner device structure */ device_t busydev; /* iicbus_release_bus calls unbusy on this */ u_int owncount; /* iicbus ownership nesting count */ u_char started; /* address of the 'started' slave * 0 if no start condition succeeded */ u_char strict; /* deny operations that violate the * I2C protocol */ struct mtx lock; u_int bus_freq; /* Configured bus Hz. */ }; struct iicbus_ivar { uint32_t addr; struct resource_list rl; }; /* Value of 0x100 is reserved for ACPI_IVAR_HANDLE used by acpi_iicbus */ enum { IICBUS_IVAR_ADDR /* Address or base address */ }; #define IICBUS_ACCESSOR(A, B, T) \ __BUS_ACCESSOR(iicbus, A, IICBUS, B, T) IICBUS_ACCESSOR(addr, ADDR, uint32_t) #define IICBUS_LOCK(sc) mtx_lock(&(sc)->lock) #define IICBUS_UNLOCK(sc) mtx_unlock(&(sc)->lock) #define IICBUS_ASSERT_LOCKED(sc) mtx_assert(&(sc)->lock, MA_OWNED) #ifdef FDT #define IICBUS_FDT_PNP_INFO(t) FDTCOMPAT_PNP_INFO(t, iicbus) #else #define IICBUS_FDT_PNP_INFO(t) #endif #ifdef DEV_ACPI #define IICBUS_ACPI_PNP_INFO(t) ACPICOMPAT_PNP_INFO(t, iicbus) #else #define IICBUS_ACPI_PNP_INFO(t) #endif int iicbus_generic_intr(device_t dev, int event, char *buf); void iicbus_init_frequency(device_t dev, u_int bus_freq); int iicbus_attach_common(device_t dev, u_int bus_freq); device_t iicbus_add_child_common(device_t dev, u_int order, const char *name, int unit, size_t ivars_size); int iicbus_detach(device_t dev); void iicbus_probe_nomatch(device_t bus, device_t child); int iicbus_read_ivar(device_t bus, device_t child, int which, uintptr_t *result); int iicbus_write_ivar(device_t bus, device_t child, int which, uintptr_t value); -int iicbus_child_location_str(device_t bus, device_t child, char *buf, - size_t buflen); -int iicbus_child_pnpinfo_str(device_t bus, device_t child, char *buf, - size_t buflen); +int iicbus_child_location(device_t bus, device_t child, struct sbuf *sb); +int iicbus_child_pnpinfo(device_t bus, device_t child, struct sbuf *sb); extern driver_t iicbus_driver; extern devclass_t iicbus_devclass; extern driver_t ofw_iicbus_driver; extern devclass_t ofw_iicbus_devclass; extern driver_t acpi_iicbus_driver; #endif diff --git a/sys/dev/iicbus/ofw_iicbus.c b/sys/dev/iicbus/ofw_iicbus.c index ffdb87fa419d..9be05d73abde 100644 --- a/sys/dev/iicbus/ofw_iicbus.c +++ b/sys/dev/iicbus/ofw_iicbus.c @@ -1,240 +1,240 @@ /*- * Copyright (c) 2009, Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" /* Methods */ static device_probe_t ofw_iicbus_probe; static device_attach_t ofw_iicbus_attach; static device_t ofw_iicbus_add_child(device_t dev, u_int order, const char *name, int unit); static const struct ofw_bus_devinfo *ofw_iicbus_get_devinfo(device_t bus, device_t dev); static device_method_t ofw_iicbus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_iicbus_probe), DEVMETHOD(device_attach, ofw_iicbus_attach), /* Bus interface */ - DEVMETHOD(bus_child_pnpinfo_str, ofw_bus_gen_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_add_child, ofw_iicbus_add_child), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, ofw_iicbus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; struct ofw_iicbus_devinfo { struct iicbus_ivar opd_dinfo; /* Must be the first. */ struct ofw_bus_devinfo opd_obdinfo; }; devclass_t ofw_iicbus_devclass; DEFINE_CLASS_1(iicbus, ofw_iicbus_driver, ofw_iicbus_methods, sizeof(struct iicbus_softc), iicbus_driver); EARLY_DRIVER_MODULE(ofw_iicbus, iicbb, ofw_iicbus_driver, ofw_iicbus_devclass, 0, 0, BUS_PASS_BUS); EARLY_DRIVER_MODULE(ofw_iicbus, iichb, ofw_iicbus_driver, ofw_iicbus_devclass, 0, 0, BUS_PASS_BUS); EARLY_DRIVER_MODULE(ofw_iicbus, twsi, ofw_iicbus_driver, ofw_iicbus_devclass, 0, 0, BUS_PASS_BUS); MODULE_VERSION(ofw_iicbus, 1); MODULE_DEPEND(ofw_iicbus, iicbus, 1, 1, 1); static int ofw_iicbus_probe(device_t dev) { if (ofw_bus_get_node(dev) == -1) return (ENXIO); device_set_desc(dev, "OFW I2C bus"); return (0); } static int ofw_iicbus_attach(device_t dev) { struct iicbus_softc *sc = IICBUS_SOFTC(dev); struct ofw_iicbus_devinfo *dinfo; phandle_t child, node, root; pcell_t freq, paddr; device_t childdev; ssize_t compatlen; char compat[255]; char *curstr; u_int iic_addr_8bit = 0; sc->dev = dev; mtx_init(&sc->lock, "iicbus", NULL, MTX_DEF); /* * If there is a clock-frequency property for the device node, use it as * the starting value for the bus frequency. Then call the common * routine that handles the tunable/sysctl which allows the FDT value to * be overridden by the user. */ node = ofw_bus_get_node(dev); freq = 0; OF_getencprop(node, "clock-frequency", &freq, sizeof(freq)); iicbus_init_frequency(dev, freq); iicbus_reset(dev, IIC_FASTEST, 0, NULL); bus_generic_probe(dev); bus_enumerate_hinted_children(dev); /* * Check if we're running on a PowerMac, needed for the I2C * address below. */ root = OF_peer(0); compatlen = OF_getprop(root, "compatible", compat, sizeof(compat)); if (compatlen != -1) { for (curstr = compat; curstr < compat + compatlen; curstr += strlen(curstr) + 1) { if (strncmp(curstr, "MacRISC", 7) == 0) iic_addr_8bit = 1; } } /* * Attach those children represented in the device tree. */ for (child = OF_child(node); child != 0; child = OF_peer(child)) { /* * Try to get the I2C address first from the i2c-address * property, then try the reg property. It moves around * on different systems. */ if (OF_getencprop(child, "i2c-address", &paddr, sizeof(paddr)) == -1) if (OF_getencprop(child, "reg", &paddr, sizeof(paddr)) == -1) continue; /* * Now set up the I2C and OFW bus layer devinfo and add it * to the bus. */ dinfo = malloc(sizeof(struct ofw_iicbus_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (dinfo == NULL) continue; /* * FreeBSD drivers expect I2C addresses to be expressed as * 8-bit values. Apple OFW data contains 8-bit values, but * Linux FDT data contains 7-bit values, so shift them up to * 8-bit format. */ if (iic_addr_8bit) dinfo->opd_dinfo.addr = paddr; else dinfo->opd_dinfo.addr = paddr << 1; if (ofw_bus_gen_setup_devinfo(&dinfo->opd_obdinfo, child) != 0) { free(dinfo, M_DEVBUF); continue; } childdev = device_add_child(dev, NULL, -1); resource_list_init(&dinfo->opd_dinfo.rl); ofw_bus_intr_to_rl(childdev, child, &dinfo->opd_dinfo.rl, NULL); device_set_ivars(childdev, dinfo); } /* Register bus */ OF_device_register_xref(OF_xref_from_node(node), dev); return (bus_generic_attach(dev)); } static device_t ofw_iicbus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct ofw_iicbus_devinfo *devi; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(sizeof(struct ofw_iicbus_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (0); } /* * NULL all the OFW-related parts of the ivars for non-OFW * children. */ devi->opd_obdinfo.obd_node = -1; devi->opd_obdinfo.obd_name = NULL; devi->opd_obdinfo.obd_compat = NULL; devi->opd_obdinfo.obd_type = NULL; devi->opd_obdinfo.obd_model = NULL; device_set_ivars(child, devi); return (child); } static const struct ofw_bus_devinfo * ofw_iicbus_get_devinfo(device_t bus, device_t dev) { struct ofw_iicbus_devinfo *dinfo; dinfo = device_get_ivars(dev); return (&dinfo->opd_obdinfo); } diff --git a/sys/dev/mii/mii.c b/sys/dev/mii/mii.c index 57294f4c096c..5163a7456ed0 100644 --- a/sys/dev/mii/mii.c +++ b/sys/dev/mii/mii.c @@ -1,681 +1,680 @@ /* $NetBSD: mii.c,v 1.12 1999/08/03 19:41:49 drochner Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause-NetBSD * * Copyright (c) 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, * NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * MII bus layer, glues MII-capable network interface drivers to sharable * PHY drivers. This exports an interface compatible with BSD/OS 3.0's, * plus some NetBSD extensions. */ #include #include #include #include #include #include +#include #include #include #include #include #include MODULE_VERSION(miibus, 1); #include "miibus_if.h" static device_attach_t miibus_attach; static bus_child_detached_t miibus_child_detached; -static bus_child_location_str_t miibus_child_location_str; -static bus_child_pnpinfo_str_t miibus_child_pnpinfo_str; +static bus_child_location_t miibus_child_location; +static bus_child_pnpinfo_t miibus_child_pnpinfo; static device_detach_t miibus_detach; static bus_hinted_child_t miibus_hinted_child; static bus_print_child_t miibus_print_child; static device_probe_t miibus_probe; static bus_read_ivar_t miibus_read_ivar; static miibus_readreg_t miibus_readreg; static miibus_statchg_t miibus_statchg; static miibus_writereg_t miibus_writereg; static miibus_linkchg_t miibus_linkchg; static miibus_mediainit_t miibus_mediainit; static unsigned char mii_bitreverse(unsigned char x); static device_method_t miibus_methods[] = { /* device interface */ DEVMETHOD(device_probe, miibus_probe), DEVMETHOD(device_attach, miibus_attach), DEVMETHOD(device_detach, miibus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, miibus_print_child), DEVMETHOD(bus_read_ivar, miibus_read_ivar), DEVMETHOD(bus_child_detached, miibus_child_detached), - DEVMETHOD(bus_child_pnpinfo_str, miibus_child_pnpinfo_str), - DEVMETHOD(bus_child_location_str, miibus_child_location_str), + DEVMETHOD(bus_child_pnpinfo, miibus_child_pnpinfo), + DEVMETHOD(bus_child_location, miibus_child_location), DEVMETHOD(bus_hinted_child, miibus_hinted_child), /* MII interface */ DEVMETHOD(miibus_readreg, miibus_readreg), DEVMETHOD(miibus_writereg, miibus_writereg), DEVMETHOD(miibus_statchg, miibus_statchg), DEVMETHOD(miibus_linkchg, miibus_linkchg), DEVMETHOD(miibus_mediainit, miibus_mediainit), DEVMETHOD_END }; devclass_t miibus_devclass; driver_t miibus_driver = { "miibus", miibus_methods, sizeof(struct mii_data) }; struct miibus_ivars { if_t ifp; ifm_change_cb_t ifmedia_upd; ifm_stat_cb_t ifmedia_sts; u_int mii_flags; u_int mii_offset; }; static int miibus_probe(device_t dev) { device_set_desc(dev, "MII bus"); return (BUS_PROBE_SPECIFIC); } static int miibus_attach(device_t dev) { struct miibus_ivars *ivars; struct mii_attach_args *ma; struct mii_data *mii; device_t *children; int i, nchildren; mii = device_get_softc(dev); if (device_get_children(dev, &children, &nchildren) == 0) { for (i = 0; i < nchildren; i++) { ma = device_get_ivars(children[i]); ma->mii_data = mii; } free(children, M_TEMP); } if (nchildren == 0) { device_printf(dev, "cannot get children\n"); return (ENXIO); } ivars = device_get_ivars(dev); ifmedia_init(&mii->mii_media, IFM_IMASK, ivars->ifmedia_upd, ivars->ifmedia_sts); mii->mii_ifp = ivars->ifp; if_setcapabilitiesbit(mii->mii_ifp, IFCAP_LINKSTATE, 0); if_setcapenablebit(mii->mii_ifp, IFCAP_LINKSTATE, 0); LIST_INIT(&mii->mii_phys); return (bus_generic_attach(dev)); } static int miibus_detach(device_t dev) { struct mii_data *mii; struct miibus_ivars *ivars; ivars = device_get_ivars(dev); bus_generic_detach(dev); mii = device_get_softc(dev); ifmedia_removeall(&mii->mii_media); free(ivars, M_DEVBUF); mii->mii_ifp = NULL; return (0); } static void miibus_child_detached(device_t dev, device_t child) { struct mii_attach_args *args; args = device_get_ivars(child); free(args, M_DEVBUF); } static int miibus_print_child(device_t dev, device_t child) { struct mii_attach_args *ma; int retval; ma = device_get_ivars(child); retval = bus_print_child_header(dev, child); retval += printf(" PHY %d", ma->mii_phyno); retval += bus_print_child_footer(dev, child); return (retval); } static int miibus_read_ivar(device_t dev, device_t child __unused, int which, uintptr_t *result) { struct miibus_ivars *ivars; /* * NB: this uses the instance variables of the miibus rather than * its PHY children. */ ivars = device_get_ivars(dev); switch (which) { case MIIBUS_IVAR_FLAGS: *result = ivars->mii_flags; break; default: return (ENOENT); } return (0); } static int -miibus_child_pnpinfo_str(device_t dev __unused, device_t child, char *buf, - size_t buflen) +miibus_child_pnpinfo(device_t dev __unused, device_t child, struct sbuf *sb) { struct mii_attach_args *ma; ma = device_get_ivars(child); - snprintf(buf, buflen, "oui=0x%x model=0x%x rev=0x%x", + sbuf_printf(sb, "oui=0x%x model=0x%x rev=0x%x", MII_OUI(ma->mii_id1, ma->mii_id2), MII_MODEL(ma->mii_id2), MII_REV(ma->mii_id2)); return (0); } static int -miibus_child_location_str(device_t dev __unused, device_t child, char *buf, - size_t buflen) +miibus_child_location(device_t dev __unused, device_t child, struct sbuf *sb) { struct mii_attach_args *ma; ma = device_get_ivars(child); - snprintf(buf, buflen, "phyno=%d", ma->mii_phyno); + sbuf_printf(sb, "phyno=%d", ma->mii_phyno); return (0); } static void miibus_hinted_child(device_t dev, const char *name, int unit) { struct miibus_ivars *ivars; struct mii_attach_args *args, *ma; device_t *children, phy; int i, nchildren; u_int val; if (resource_int_value(name, unit, "phyno", &val) != 0) return; if (device_get_children(dev, &children, &nchildren) != 0) return; ma = NULL; for (i = 0; i < nchildren; i++) { args = device_get_ivars(children[i]); if (args->mii_phyno == val) { ma = args; break; } } free(children, M_TEMP); /* * Don't add a PHY that was automatically identified by having media * in its BMSR twice, only allow to alter its attach arguments. */ if (ma == NULL) { ma = malloc(sizeof(struct mii_attach_args), M_DEVBUF, M_NOWAIT); if (ma == NULL) return; phy = device_add_child(dev, name, unit); if (phy == NULL) { free(ma, M_DEVBUF); return; } ivars = device_get_ivars(dev); ma->mii_phyno = val; ma->mii_offset = ivars->mii_offset++; ma->mii_id1 = 0; ma->mii_id2 = 0; ma->mii_capmask = BMSR_DEFCAPMASK; device_set_ivars(phy, ma); } if (resource_int_value(name, unit, "id1", &val) == 0) ma->mii_id1 = val; if (resource_int_value(name, unit, "id2", &val) == 0) ma->mii_id2 = val; if (resource_int_value(name, unit, "capmask", &val) == 0) ma->mii_capmask = val; } static int miibus_readreg(device_t dev, int phy, int reg) { device_t parent; parent = device_get_parent(dev); return (MIIBUS_READREG(parent, phy, reg)); } static int miibus_writereg(device_t dev, int phy, int reg, int data) { device_t parent; parent = device_get_parent(dev); return (MIIBUS_WRITEREG(parent, phy, reg, data)); } static void miibus_statchg(device_t dev) { device_t parent; struct mii_data *mii; parent = device_get_parent(dev); MIIBUS_STATCHG(parent); mii = device_get_softc(dev); if_setbaudrate(mii->mii_ifp, ifmedia_baudrate(mii->mii_media_active)); } static void miibus_linkchg(device_t dev) { struct mii_data *mii; device_t parent; int link_state; parent = device_get_parent(dev); MIIBUS_LINKCHG(parent); mii = device_get_softc(dev); if (mii->mii_media_status & IFM_AVALID) { if (mii->mii_media_status & IFM_ACTIVE) link_state = LINK_STATE_UP; else link_state = LINK_STATE_DOWN; } else link_state = LINK_STATE_UNKNOWN; if_link_state_change(mii->mii_ifp, link_state); } static void miibus_mediainit(device_t dev) { struct mii_data *mii; struct ifmedia_entry *m; int media = 0; /* Poke the parent in case it has any media of its own to add. */ MIIBUS_MEDIAINIT(device_get_parent(dev)); mii = device_get_softc(dev); LIST_FOREACH(m, &mii->mii_media.ifm_list, ifm_list) { media = m->ifm_media; if (media == (IFM_ETHER | IFM_AUTO)) break; } ifmedia_set(&mii->mii_media, media); } /* * Helper function used by network interface drivers, attaches the miibus and * the PHYs to the network interface driver parent. */ int mii_attach(device_t dev, device_t *miibus, if_t ifp, ifm_change_cb_t ifmedia_upd, ifm_stat_cb_t ifmedia_sts, int capmask, int phyloc, int offloc, int flags) { struct miibus_ivars *ivars; struct mii_attach_args *args, ma; device_t *children, phy; int bmsr, first, i, nchildren, phymax, phymin, rv; uint32_t phymask; if (phyloc != MII_PHY_ANY && offloc != MII_OFFSET_ANY) { printf("%s: phyloc and offloc specified\n", __func__); return (EINVAL); } if (offloc != MII_OFFSET_ANY && (offloc < 0 || offloc >= MII_NPHY)) { printf("%s: invalid offloc %d\n", __func__, offloc); return (EINVAL); } if (phyloc == MII_PHY_ANY) { phymin = 0; phymax = MII_NPHY - 1; } else { if (phyloc < 0 || phyloc >= MII_NPHY) { printf("%s: invalid phyloc %d\n", __func__, phyloc); return (EINVAL); } phymin = phymax = phyloc; } first = 0; if (*miibus == NULL) { first = 1; ivars = malloc(sizeof(*ivars), M_DEVBUF, M_NOWAIT); if (ivars == NULL) return (ENOMEM); ivars->ifp = ifp; ivars->ifmedia_upd = ifmedia_upd; ivars->ifmedia_sts = ifmedia_sts; ivars->mii_flags = flags; *miibus = device_add_child(dev, "miibus", -1); if (*miibus == NULL) { rv = ENXIO; goto fail; } device_set_ivars(*miibus, ivars); } else { ivars = device_get_ivars(*miibus); if (ivars->ifp != ifp || ivars->ifmedia_upd != ifmedia_upd || ivars->ifmedia_sts != ifmedia_sts || ivars->mii_flags != flags) { printf("%s: non-matching invariant\n", __func__); return (EINVAL); } /* * Assignment of the attach arguments mii_data for the first * pass is done in miibus_attach(), i.e. once the miibus softc * has been allocated. */ ma.mii_data = device_get_softc(*miibus); } ma.mii_capmask = capmask; if (resource_int_value(device_get_name(*miibus), device_get_unit(*miibus), "phymask", &phymask) != 0) phymask = 0xffffffff; if (device_get_children(*miibus, &children, &nchildren) != 0) { children = NULL; nchildren = 0; } ivars->mii_offset = 0; for (ma.mii_phyno = phymin; ma.mii_phyno <= phymax; ma.mii_phyno++) { /* * Make sure we haven't already configured a PHY at this * address. This allows mii_attach() to be called * multiple times. */ for (i = 0; i < nchildren; i++) { args = device_get_ivars(children[i]); if (args->mii_phyno == ma.mii_phyno) { /* * Yes, there is already something * configured at this address. */ goto skip; } } /* * Check to see if there is a PHY at this address. Note, * many braindead PHYs report 0/0 in their ID registers, * so we test for media in the BMSR. */ bmsr = MIIBUS_READREG(dev, ma.mii_phyno, MII_BMSR); if (bmsr == 0 || bmsr == 0xffff || (bmsr & (BMSR_EXTSTAT | BMSR_MEDIAMASK)) == 0) { /* Assume no PHY at this address. */ continue; } /* * There is a PHY at this address. If we were given an * `offset' locator, skip this PHY if it doesn't match. */ if (offloc != MII_OFFSET_ANY && offloc != ivars->mii_offset) goto skip; /* * Skip this PHY if it's not included in the phymask hint. */ if ((phymask & (1 << ma.mii_phyno)) == 0) goto skip; /* * Extract the IDs. Braindead PHYs will be handled by * the `ukphy' driver, as we have no ID information to * match on. */ ma.mii_id1 = MIIBUS_READREG(dev, ma.mii_phyno, MII_PHYIDR1); ma.mii_id2 = MIIBUS_READREG(dev, ma.mii_phyno, MII_PHYIDR2); ma.mii_offset = ivars->mii_offset; args = malloc(sizeof(struct mii_attach_args), M_DEVBUF, M_NOWAIT); if (args == NULL) goto skip; bcopy((char *)&ma, (char *)args, sizeof(ma)); phy = device_add_child(*miibus, NULL, -1); if (phy == NULL) { free(args, M_DEVBUF); goto skip; } device_set_ivars(phy, args); skip: ivars->mii_offset++; } free(children, M_TEMP); if (first != 0) { rv = device_set_driver(*miibus, &miibus_driver); if (rv != 0) goto fail; bus_enumerate_hinted_children(*miibus); rv = device_get_children(*miibus, &children, &nchildren); if (rv != 0) goto fail; free(children, M_TEMP); if (nchildren == 0) { rv = ENXIO; goto fail; } rv = bus_generic_attach(dev); if (rv != 0) goto fail; /* Attaching of the PHY drivers is done in miibus_attach(). */ return (0); } rv = bus_generic_attach(*miibus); if (rv != 0) goto fail; return (0); fail: if (*miibus != NULL) device_delete_child(dev, *miibus); free(ivars, M_DEVBUF); if (first != 0) *miibus = NULL; return (rv); } /* * Media changed; notify all PHYs. */ int mii_mediachg(struct mii_data *mii) { struct mii_softc *child; struct ifmedia_entry *ife = mii->mii_media.ifm_cur; int rv; mii->mii_media_status = 0; mii->mii_media_active = IFM_NONE; LIST_FOREACH(child, &mii->mii_phys, mii_list) { /* * If the media indicates a different PHY instance, * isolate this one. */ if (IFM_INST(ife->ifm_media) != child->mii_inst) { if ((child->mii_flags & MIIF_NOISOLATE) != 0) { device_printf(child->mii_dev, "%s: " "can't handle non-zero PHY instance %d\n", __func__, child->mii_inst); continue; } PHY_WRITE(child, MII_BMCR, PHY_READ(child, MII_BMCR) | BMCR_ISO); continue; } rv = PHY_SERVICE(child, mii, MII_MEDIACHG); if (rv) return (rv); } return (0); } /* * Call the PHY tick routines, used during autonegotiation. */ void mii_tick(struct mii_data *mii) { struct mii_softc *child; struct ifmedia_entry *ife = mii->mii_media.ifm_cur; LIST_FOREACH(child, &mii->mii_phys, mii_list) { /* * If this PHY instance isn't currently selected, just skip * it. */ if (IFM_INST(ife->ifm_media) != child->mii_inst) continue; (void)PHY_SERVICE(child, mii, MII_TICK); } } /* * Get media status from PHYs. */ void mii_pollstat(struct mii_data *mii) { struct mii_softc *child; struct ifmedia_entry *ife = mii->mii_media.ifm_cur; mii->mii_media_status = 0; mii->mii_media_active = IFM_NONE; LIST_FOREACH(child, &mii->mii_phys, mii_list) { /* * If we're not polling this PHY instance, just skip it. */ if (IFM_INST(ife->ifm_media) != child->mii_inst) continue; (void)PHY_SERVICE(child, mii, MII_POLLSTAT); } } static unsigned char mii_bitreverse(unsigned char x) { static unsigned const char nibbletab[16] = { 0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15 }; return ((nibbletab[x & 15] << 4) | nibbletab[x >> 4]); } u_int mii_oui(u_int id1, u_int id2) { u_int h; h = (id1 << 6) | (id2 >> 10); return ((mii_bitreverse(h >> 16) << 16) | (mii_bitreverse((h >> 8) & 0xff) << 8) | mii_bitreverse(h & 0xff)); } int mii_phy_mac_match(struct mii_softc *mii, const char *name) { return (strcmp(device_get_name(device_get_parent(mii->mii_dev)), name) == 0); } int mii_dev_mac_match(device_t parent, const char *name) { return (strcmp(device_get_name(device_get_parent( device_get_parent(parent))), name) == 0); } void * mii_phy_mac_softc(struct mii_softc *mii) { return (device_get_softc(device_get_parent(mii->mii_dev))); } void * mii_dev_mac_softc(device_t parent) { return (device_get_softc(device_get_parent(device_get_parent(parent)))); } diff --git a/sys/dev/mmc/mmc.c b/sys/dev/mmc/mmc.c index ddbc2669b1f7..a55ec671bdb0 100644 --- a/sys/dev/mmc/mmc.c +++ b/sys/dev/mmc/mmc.c @@ -1,2582 +1,2581 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Bernd Walter. All rights reserved. * Copyright (c) 2006 M. Warner Losh * Copyright (c) 2017 Marius Strobl * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Portions of this software may have been developed with reference to * the SD Simplified Specification. The following disclaimer may apply: * * The following conditions apply to the release of the simplified * specification ("Simplified Specification") by the SD Card Association and * the SD Group. The Simplified Specification is a subset of the complete SD * Specification which is owned by the SD Card Association and the SD * Group. This Simplified Specification is provided on a non-confidential * basis subject to the disclaimers below. Any implementation of the * Simplified Specification may require a license from the SD Card * Association, SD Group, SD-3C LLC or other third parties. * * Disclaimers: * * The information contained in the Simplified Specification is presented only * as a standard specification for SD Cards and SD Host/Ancillary products and * is provided "AS-IS" without any representations or warranties of any * kind. No responsibility is assumed by the SD Group, SD-3C LLC or the SD * Card Association for any damages, any infringements of patents or other * right of the SD Group, SD-3C LLC, the SD Card Association or any third * parties, which may result from its use. No license is granted by * implication, estoppel or otherwise under any patent or other rights of the * SD Group, SD-3C LLC, the SD Card Association or any third party. Nothing * herein shall be construed as an obligation by the SD Group, the SD-3C LLC * or the SD Card Association to disclose or distribute any technical * information, know-how or other confidential information to any third party. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include "mmcbr_if.h" #include "mmcbus_if.h" CTASSERT(bus_timing_max <= sizeof(uint32_t) * NBBY); /* * Per-card data */ struct mmc_ivars { uint32_t raw_cid[4]; /* Raw bits of the CID */ uint32_t raw_csd[4]; /* Raw bits of the CSD */ uint32_t raw_scr[2]; /* Raw bits of the SCR */ uint8_t raw_ext_csd[MMC_EXTCSD_SIZE]; /* Raw bits of the EXT_CSD */ uint32_t raw_sd_status[16]; /* Raw bits of the SD_STATUS */ uint16_t rca; u_char read_only; /* True when the device is read-only */ u_char high_cap; /* High Capacity device (block addressed) */ enum mmc_card_mode mode; enum mmc_bus_width bus_width; /* Bus width to use */ struct mmc_cid cid; /* cid decoded */ struct mmc_csd csd; /* csd decoded */ struct mmc_scr scr; /* scr decoded */ struct mmc_sd_status sd_status; /* SD_STATUS decoded */ uint32_t sec_count; /* Card capacity in 512byte blocks */ uint32_t timings; /* Mask of bus timings supported */ uint32_t vccq_120; /* Mask of bus timings at VCCQ of 1.2 V */ uint32_t vccq_180; /* Mask of bus timings at VCCQ of 1.8 V */ uint32_t tran_speed; /* Max speed in normal mode */ uint32_t hs_tran_speed; /* Max speed in high speed mode */ uint32_t erase_sector; /* Card native erase sector size */ uint32_t cmd6_time; /* Generic switch timeout [us] */ uint32_t quirks; /* Quirks as per mmc_quirk->quirks */ char card_id_string[64];/* Formatted CID info (serial, MFG, etc) */ char card_sn_string[16];/* Formatted serial # for disk->d_ident */ }; #define CMD_RETRIES 3 static const struct mmc_quirk mmc_quirks[] = { /* * For some SanDisk iNAND devices, the CMD38 argument needs to be * provided in EXT_CSD[113]. */ { 0x2, 0x100, "SEM02G", MMC_QUIRK_INAND_CMD38 }, { 0x2, 0x100, "SEM04G", MMC_QUIRK_INAND_CMD38 }, { 0x2, 0x100, "SEM08G", MMC_QUIRK_INAND_CMD38 }, { 0x2, 0x100, "SEM16G", MMC_QUIRK_INAND_CMD38 }, { 0x2, 0x100, "SEM32G", MMC_QUIRK_INAND_CMD38 }, /* * Disable TRIM for Kingston eMMCs where a firmware bug can lead to * unrecoverable data corruption. */ { 0x70, MMC_QUIRK_OID_ANY, "V10008", MMC_QUIRK_BROKEN_TRIM }, { 0x70, MMC_QUIRK_OID_ANY, "V10016", MMC_QUIRK_BROKEN_TRIM }, { 0x0, 0x0, NULL, 0x0 } }; static SYSCTL_NODE(_hw, OID_AUTO, mmc, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "mmc driver"); static int mmc_debug; SYSCTL_INT(_hw_mmc, OID_AUTO, debug, CTLFLAG_RWTUN, &mmc_debug, 0, "Debug level"); /* bus entry points */ static int mmc_acquire_bus(device_t busdev, device_t dev); static int mmc_attach(device_t dev); -static int mmc_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen); +static int mmc_child_location(device_t dev, device_t child, struct sbuf *sb); static int mmc_detach(device_t dev); static int mmc_probe(device_t dev); static int mmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result); static int mmc_release_bus(device_t busdev, device_t dev); static int mmc_resume(device_t dev); static void mmc_retune_pause(device_t busdev, device_t dev, bool retune); static void mmc_retune_unpause(device_t busdev, device_t dev); static int mmc_suspend(device_t dev); static int mmc_wait_for_request(device_t busdev, device_t dev, struct mmc_request *req); static int mmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value); #define MMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define MMC_LOCK_INIT(_sc) \ mtx_init(&(_sc)->sc_mtx, device_get_nameunit((_sc)->dev), \ "mmc", MTX_DEF) #define MMC_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx); #define MMC_ASSERT_LOCKED(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED); #define MMC_ASSERT_UNLOCKED(_sc) mtx_assert(&(_sc)->sc_mtx, MA_NOTOWNED); static int mmc_all_send_cid(struct mmc_softc *sc, uint32_t *rawcid); static void mmc_app_decode_scr(uint32_t *raw_scr, struct mmc_scr *scr); static void mmc_app_decode_sd_status(uint32_t *raw_sd_status, struct mmc_sd_status *sd_status); static int mmc_app_sd_status(struct mmc_softc *sc, uint16_t rca, uint32_t *rawsdstatus); static int mmc_app_send_scr(struct mmc_softc *sc, uint16_t rca, uint32_t *rawscr); static int mmc_calculate_clock(struct mmc_softc *sc); static void mmc_decode_cid_mmc(uint32_t *raw_cid, struct mmc_cid *cid, bool is_4_41p); static void mmc_decode_cid_sd(uint32_t *raw_cid, struct mmc_cid *cid); static void mmc_decode_csd_mmc(uint32_t *raw_csd, struct mmc_csd *csd); static int mmc_decode_csd_sd(uint32_t *raw_csd, struct mmc_csd *csd); static void mmc_delayed_attach(void *xsc); static int mmc_delete_cards(struct mmc_softc *sc, bool final); static void mmc_discover_cards(struct mmc_softc *sc); static void mmc_format_card_id_string(struct mmc_ivars *ivar); static void mmc_go_discovery(struct mmc_softc *sc); static uint32_t mmc_get_bits(uint32_t *bits, int bit_len, int start, int size); static int mmc_highest_voltage(uint32_t ocr); static bool mmc_host_timing(device_t dev, enum mmc_bus_timing timing); static void mmc_idle_cards(struct mmc_softc *sc); static void mmc_ms_delay(int ms); static void mmc_log_card(device_t dev, struct mmc_ivars *ivar, int newcard); static void mmc_power_down(struct mmc_softc *sc); static void mmc_power_up(struct mmc_softc *sc); static void mmc_rescan_cards(struct mmc_softc *sc); static int mmc_retune(device_t busdev, device_t dev, bool reset); static void mmc_scan(struct mmc_softc *sc); static int mmc_sd_switch(struct mmc_softc *sc, uint8_t mode, uint8_t grp, uint8_t value, uint8_t *res); static int mmc_select_card(struct mmc_softc *sc, uint16_t rca); static uint32_t mmc_select_vdd(struct mmc_softc *sc, uint32_t ocr); static int mmc_send_app_op_cond(struct mmc_softc *sc, uint32_t ocr, uint32_t *rocr); static int mmc_send_csd(struct mmc_softc *sc, uint16_t rca, uint32_t *rawcsd); static int mmc_send_if_cond(struct mmc_softc *sc, uint8_t vhs); static int mmc_send_op_cond(struct mmc_softc *sc, uint32_t ocr, uint32_t *rocr); static int mmc_send_relative_addr(struct mmc_softc *sc, uint32_t *resp); static int mmc_set_blocklen(struct mmc_softc *sc, uint32_t len); static int mmc_set_card_bus_width(struct mmc_softc *sc, struct mmc_ivars *ivar, enum mmc_bus_timing timing); static int mmc_set_power_class(struct mmc_softc *sc, struct mmc_ivars *ivar); static int mmc_set_relative_addr(struct mmc_softc *sc, uint16_t resp); static int mmc_set_timing(struct mmc_softc *sc, struct mmc_ivars *ivar, enum mmc_bus_timing timing); static int mmc_set_vccq(struct mmc_softc *sc, struct mmc_ivars *ivar, enum mmc_bus_timing timing); static int mmc_switch_to_hs200(struct mmc_softc *sc, struct mmc_ivars *ivar, uint32_t clock); static int mmc_switch_to_hs400(struct mmc_softc *sc, struct mmc_ivars *ivar, uint32_t max_dtr, enum mmc_bus_timing max_timing); static int mmc_test_bus_width(struct mmc_softc *sc); static uint32_t mmc_timing_to_dtr(struct mmc_ivars *ivar, enum mmc_bus_timing timing); static const char *mmc_timing_to_string(enum mmc_bus_timing timing); static void mmc_update_child_list(struct mmc_softc *sc); static int mmc_wait_for_command(struct mmc_softc *sc, uint32_t opcode, uint32_t arg, uint32_t flags, uint32_t *resp, int retries); static int mmc_wait_for_req(struct mmc_softc *sc, struct mmc_request *req); static void mmc_wakeup(struct mmc_request *req); static void mmc_ms_delay(int ms) { DELAY(1000 * ms); /* XXX BAD */ } static int mmc_probe(device_t dev) { device_set_desc(dev, "MMC/SD bus"); return (0); } static int mmc_attach(device_t dev) { struct mmc_softc *sc; sc = device_get_softc(dev); sc->dev = dev; MMC_LOCK_INIT(sc); /* We'll probe and attach our children later, but before / mount */ sc->config_intrhook.ich_func = mmc_delayed_attach; sc->config_intrhook.ich_arg = sc; if (config_intrhook_establish(&sc->config_intrhook) != 0) device_printf(dev, "config_intrhook_establish failed\n"); return (0); } static int mmc_detach(device_t dev) { struct mmc_softc *sc = device_get_softc(dev); int err; err = mmc_delete_cards(sc, true); if (err != 0) return (err); mmc_power_down(sc); MMC_LOCK_DESTROY(sc); return (0); } static int mmc_suspend(device_t dev) { struct mmc_softc *sc = device_get_softc(dev); int err; err = bus_generic_suspend(dev); if (err != 0) return (err); /* * We power down with the bus acquired here, mainly so that no device * is selected any longer and sc->last_rca gets set to 0. Otherwise, * the deselect as part of the bus acquisition in mmc_scan() may fail * during resume, as the bus isn't powered up again before later in * mmc_go_discovery(). */ err = mmc_acquire_bus(dev, dev); if (err != 0) return (err); mmc_power_down(sc); err = mmc_release_bus(dev, dev); return (err); } static int mmc_resume(device_t dev) { struct mmc_softc *sc = device_get_softc(dev); mmc_scan(sc); return (bus_generic_resume(dev)); } static int mmc_acquire_bus(device_t busdev, device_t dev) { struct mmc_softc *sc; struct mmc_ivars *ivar; int err; uint16_t rca; enum mmc_bus_timing timing; err = MMCBR_ACQUIRE_HOST(device_get_parent(busdev), busdev); if (err) return (err); sc = device_get_softc(busdev); MMC_LOCK(sc); if (sc->owner) panic("mmc: host bridge didn't serialize us."); sc->owner = dev; MMC_UNLOCK(sc); if (busdev != dev) { /* * Keep track of the last rca that we've selected. If * we're asked to do it again, don't. We never * unselect unless the bus code itself wants the mmc * bus, and constantly reselecting causes problems. */ ivar = device_get_ivars(dev); rca = ivar->rca; if (sc->last_rca != rca) { if (mmc_select_card(sc, rca) != MMC_ERR_NONE) { device_printf(busdev, "Card at relative " "address %d failed to select\n", rca); return (ENXIO); } sc->last_rca = rca; timing = mmcbr_get_timing(busdev); /* * For eMMC modes, setting/updating bus width and VCCQ * only really is necessary if there actually is more * than one device on the bus as generally that already * had to be done by mmc_calculate_clock() or one of * its calees. Moreover, setting the bus width anew * can trigger re-tuning (via a CRC error on the next * CMD), even if not switching between devices an the * previously selected one is still tuned. Obviously, * we need to re-tune the host controller if devices * are actually switched, though. */ if (timing >= bus_timing_mmc_ddr52 && sc->child_count == 1) return (0); /* Prepare bus width for the new card. */ if (bootverbose || mmc_debug) { device_printf(busdev, "setting bus width to %d bits %s timing\n", (ivar->bus_width == bus_width_4) ? 4 : (ivar->bus_width == bus_width_8) ? 8 : 1, mmc_timing_to_string(timing)); } if (mmc_set_card_bus_width(sc, ivar, timing) != MMC_ERR_NONE) { device_printf(busdev, "Card at relative " "address %d failed to set bus width\n", rca); return (ENXIO); } mmcbr_set_bus_width(busdev, ivar->bus_width); mmcbr_update_ios(busdev); if (mmc_set_vccq(sc, ivar, timing) != MMC_ERR_NONE) { device_printf(busdev, "Failed to set VCCQ " "for card at relative address %d\n", rca); return (ENXIO); } if (timing >= bus_timing_mmc_hs200 && mmc_retune(busdev, dev, true) != 0) { device_printf(busdev, "Card at relative " "address %d failed to re-tune\n", rca); return (ENXIO); } } } else { /* * If there's a card selected, stand down. */ if (sc->last_rca != 0) { if (mmc_select_card(sc, 0) != MMC_ERR_NONE) return (ENXIO); sc->last_rca = 0; } } return (0); } static int mmc_release_bus(device_t busdev, device_t dev) { struct mmc_softc *sc; int err; sc = device_get_softc(busdev); MMC_LOCK(sc); if (!sc->owner) panic("mmc: releasing unowned bus."); if (sc->owner != dev) panic("mmc: you don't own the bus. game over."); MMC_UNLOCK(sc); err = MMCBR_RELEASE_HOST(device_get_parent(busdev), busdev); if (err) return (err); MMC_LOCK(sc); sc->owner = NULL; MMC_UNLOCK(sc); return (0); } static uint32_t mmc_select_vdd(struct mmc_softc *sc, uint32_t ocr) { return (ocr & MMC_OCR_VOLTAGE); } static int mmc_highest_voltage(uint32_t ocr) { int i; for (i = MMC_OCR_MAX_VOLTAGE_SHIFT; i >= MMC_OCR_MIN_VOLTAGE_SHIFT; i--) if (ocr & (1 << i)) return (i); return (-1); } static void mmc_wakeup(struct mmc_request *req) { struct mmc_softc *sc; sc = (struct mmc_softc *)req->done_data; MMC_LOCK(sc); req->flags |= MMC_REQ_DONE; MMC_UNLOCK(sc); wakeup(req); } static int mmc_wait_for_req(struct mmc_softc *sc, struct mmc_request *req) { req->done = mmc_wakeup; req->done_data = sc; if (__predict_false(mmc_debug > 1)) { device_printf(sc->dev, "REQUEST: CMD%d arg %#x flags %#x", req->cmd->opcode, req->cmd->arg, req->cmd->flags); if (req->cmd->data) { printf(" data %d\n", (int)req->cmd->data->len); } else printf("\n"); } MMCBR_REQUEST(device_get_parent(sc->dev), sc->dev, req); MMC_LOCK(sc); while ((req->flags & MMC_REQ_DONE) == 0) msleep(req, &sc->sc_mtx, 0, "mmcreq", 0); MMC_UNLOCK(sc); if (__predict_false(mmc_debug > 2 || (mmc_debug > 0 && req->cmd->error != MMC_ERR_NONE))) device_printf(sc->dev, "CMD%d RESULT: %d\n", req->cmd->opcode, req->cmd->error); return (0); } static int mmc_wait_for_request(device_t busdev, device_t dev, struct mmc_request *req) { struct mmc_softc *sc; struct mmc_ivars *ivar; int err, i; enum mmc_retune_req retune_req; sc = device_get_softc(busdev); KASSERT(sc->owner != NULL, ("%s: Request from %s without bus being acquired.", __func__, device_get_nameunit(dev))); /* * Unless no device is selected or re-tuning is already ongoing, * execute re-tuning if a) the bridge is requesting to do so and * re-tuning hasn't been otherwise paused, or b) if a child asked * to be re-tuned prior to pausing (see also mmc_retune_pause()). */ if (__predict_false(sc->last_rca != 0 && sc->retune_ongoing == 0 && (((retune_req = mmcbr_get_retune_req(busdev)) != retune_req_none && sc->retune_paused == 0) || sc->retune_needed == 1))) { if (__predict_false(mmc_debug > 1)) { device_printf(busdev, "Re-tuning with%s circuit reset required\n", retune_req == retune_req_reset ? "" : "out"); } if (device_get_parent(dev) == busdev) ivar = device_get_ivars(dev); else { for (i = 0; i < sc->child_count; i++) { ivar = device_get_ivars(sc->child_list[i]); if (ivar->rca == sc->last_rca) break; } if (ivar->rca != sc->last_rca) return (EINVAL); } sc->retune_ongoing = 1; err = mmc_retune(busdev, dev, retune_req == retune_req_reset); sc->retune_ongoing = 0; switch (err) { case MMC_ERR_NONE: case MMC_ERR_FAILED: /* Re-tune error but still might work */ break; case MMC_ERR_BADCRC: /* Switch failure on HS400 recovery */ return (ENXIO); case MMC_ERR_INVALID: /* Driver implementation b0rken */ default: /* Unknown error, should not happen */ return (EINVAL); } sc->retune_needed = 0; } return (mmc_wait_for_req(sc, req)); } static int mmc_wait_for_command(struct mmc_softc *sc, uint32_t opcode, uint32_t arg, uint32_t flags, uint32_t *resp, int retries) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = opcode; cmd.arg = arg; cmd.flags = flags; cmd.data = NULL; err = mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, retries); if (err) return (err); if (resp) { if (flags & MMC_RSP_136) memcpy(resp, cmd.resp, 4 * sizeof(uint32_t)); else *resp = cmd.resp[0]; } return (0); } static void mmc_idle_cards(struct mmc_softc *sc) { device_t dev; struct mmc_command cmd; dev = sc->dev; mmcbr_set_chip_select(dev, cs_high); mmcbr_update_ios(dev); mmc_ms_delay(1); memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_GO_IDLE_STATE; cmd.arg = 0; cmd.flags = MMC_RSP_NONE | MMC_CMD_BC; cmd.data = NULL; mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, CMD_RETRIES); mmc_ms_delay(1); mmcbr_set_chip_select(dev, cs_dontcare); mmcbr_update_ios(dev); mmc_ms_delay(1); } static int mmc_send_app_op_cond(struct mmc_softc *sc, uint32_t ocr, uint32_t *rocr) { struct mmc_command cmd; int err = MMC_ERR_NONE, i; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = ACMD_SD_SEND_OP_COND; cmd.arg = ocr; cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; cmd.data = NULL; for (i = 0; i < 1000; i++) { err = mmc_wait_for_app_cmd(sc->dev, sc->dev, 0, &cmd, CMD_RETRIES); if (err != MMC_ERR_NONE) break; if ((cmd.resp[0] & MMC_OCR_CARD_BUSY) || (ocr & MMC_OCR_VOLTAGE) == 0) break; err = MMC_ERR_TIMEOUT; mmc_ms_delay(10); } if (rocr && err == MMC_ERR_NONE) *rocr = cmd.resp[0]; return (err); } static int mmc_send_op_cond(struct mmc_softc *sc, uint32_t ocr, uint32_t *rocr) { struct mmc_command cmd; int err = MMC_ERR_NONE, i; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_SEND_OP_COND; cmd.arg = ocr; cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; cmd.data = NULL; for (i = 0; i < 1000; i++) { err = mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, CMD_RETRIES); if (err != MMC_ERR_NONE) break; if ((cmd.resp[0] & MMC_OCR_CARD_BUSY) || (ocr & MMC_OCR_VOLTAGE) == 0) break; err = MMC_ERR_TIMEOUT; mmc_ms_delay(10); } if (rocr && err == MMC_ERR_NONE) *rocr = cmd.resp[0]; return (err); } static int mmc_send_if_cond(struct mmc_softc *sc, uint8_t vhs) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = SD_SEND_IF_COND; cmd.arg = (vhs << 8) + 0xAA; cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR; cmd.data = NULL; err = mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, CMD_RETRIES); return (err); } static void mmc_power_up(struct mmc_softc *sc) { device_t dev; enum mmc_vccq vccq; dev = sc->dev; mmcbr_set_vdd(dev, mmc_highest_voltage(mmcbr_get_host_ocr(dev))); mmcbr_set_bus_mode(dev, opendrain); mmcbr_set_chip_select(dev, cs_dontcare); mmcbr_set_bus_width(dev, bus_width_1); mmcbr_set_power_mode(dev, power_up); mmcbr_set_clock(dev, 0); mmcbr_update_ios(dev); for (vccq = vccq_330; ; vccq--) { mmcbr_set_vccq(dev, vccq); if (mmcbr_switch_vccq(dev) == 0 || vccq == vccq_120) break; } mmc_ms_delay(1); mmcbr_set_clock(dev, SD_MMC_CARD_ID_FREQUENCY); mmcbr_set_timing(dev, bus_timing_normal); mmcbr_set_power_mode(dev, power_on); mmcbr_update_ios(dev); mmc_ms_delay(2); } static void mmc_power_down(struct mmc_softc *sc) { device_t dev = sc->dev; mmcbr_set_bus_mode(dev, opendrain); mmcbr_set_chip_select(dev, cs_dontcare); mmcbr_set_bus_width(dev, bus_width_1); mmcbr_set_power_mode(dev, power_off); mmcbr_set_clock(dev, 0); mmcbr_set_timing(dev, bus_timing_normal); mmcbr_update_ios(dev); } static int mmc_select_card(struct mmc_softc *sc, uint16_t rca) { int err, flags; flags = (rca ? MMC_RSP_R1B : MMC_RSP_NONE) | MMC_CMD_AC; sc->retune_paused++; err = mmc_wait_for_command(sc, MMC_SELECT_CARD, (uint32_t)rca << 16, flags, NULL, CMD_RETRIES); sc->retune_paused--; return (err); } static int mmc_sd_switch(struct mmc_softc *sc, uint8_t mode, uint8_t grp, uint8_t value, uint8_t *res) { int err; struct mmc_command cmd; struct mmc_data data; memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); memset(res, 0, 64); cmd.opcode = SD_SWITCH_FUNC; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.arg = mode << 31; /* 0 - check, 1 - set */ cmd.arg |= 0x00FFFFFF; cmd.arg &= ~(0xF << (grp * 4)); cmd.arg |= value << (grp * 4); cmd.data = &data; data.data = res; data.len = 64; data.flags = MMC_DATA_READ; err = mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, CMD_RETRIES); return (err); } static int mmc_set_card_bus_width(struct mmc_softc *sc, struct mmc_ivars *ivar, enum mmc_bus_timing timing) { struct mmc_command cmd; int err; uint8_t value; if (mmcbr_get_mode(sc->dev) == mode_sd) { memset(&cmd, 0, sizeof(cmd)); cmd.opcode = ACMD_SET_CLR_CARD_DETECT; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; cmd.arg = SD_CLR_CARD_DETECT; err = mmc_wait_for_app_cmd(sc->dev, sc->dev, ivar->rca, &cmd, CMD_RETRIES); if (err != 0) return (err); memset(&cmd, 0, sizeof(cmd)); cmd.opcode = ACMD_SET_BUS_WIDTH; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; switch (ivar->bus_width) { case bus_width_1: cmd.arg = SD_BUS_WIDTH_1; break; case bus_width_4: cmd.arg = SD_BUS_WIDTH_4; break; default: return (MMC_ERR_INVALID); } err = mmc_wait_for_app_cmd(sc->dev, sc->dev, ivar->rca, &cmd, CMD_RETRIES); } else { switch (ivar->bus_width) { case bus_width_1: if (timing == bus_timing_mmc_hs400 || timing == bus_timing_mmc_hs400es) return (MMC_ERR_INVALID); value = EXT_CSD_BUS_WIDTH_1; break; case bus_width_4: switch (timing) { case bus_timing_mmc_ddr52: value = EXT_CSD_BUS_WIDTH_4_DDR; break; case bus_timing_mmc_hs400: case bus_timing_mmc_hs400es: return (MMC_ERR_INVALID); default: value = EXT_CSD_BUS_WIDTH_4; break; } break; case bus_width_8: value = 0; switch (timing) { case bus_timing_mmc_hs400es: value = EXT_CSD_BUS_WIDTH_ES; /* FALLTHROUGH */ case bus_timing_mmc_ddr52: case bus_timing_mmc_hs400: value |= EXT_CSD_BUS_WIDTH_8_DDR; break; default: value = EXT_CSD_BUS_WIDTH_8; break; } break; default: return (MMC_ERR_INVALID); } err = mmc_switch(sc->dev, sc->dev, ivar->rca, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, value, ivar->cmd6_time, true); } return (err); } static int mmc_set_power_class(struct mmc_softc *sc, struct mmc_ivars *ivar) { device_t dev; const uint8_t *ext_csd; uint32_t clock; uint8_t value; enum mmc_bus_timing timing; enum mmc_bus_width bus_width; dev = sc->dev; timing = mmcbr_get_timing(dev); bus_width = ivar->bus_width; if (mmcbr_get_mode(dev) != mode_mmc || ivar->csd.spec_vers < 4 || timing == bus_timing_normal || bus_width == bus_width_1) return (MMC_ERR_NONE); value = 0; ext_csd = ivar->raw_ext_csd; clock = mmcbr_get_clock(dev); switch (1 << mmcbr_get_vdd(dev)) { case MMC_OCR_LOW_VOLTAGE: if (clock <= MMC_TYPE_HS_26_MAX) value = ext_csd[EXT_CSD_PWR_CL_26_195]; else if (clock <= MMC_TYPE_HS_52_MAX) { if (timing >= bus_timing_mmc_ddr52 && bus_width >= bus_width_4) value = ext_csd[EXT_CSD_PWR_CL_52_195_DDR]; else value = ext_csd[EXT_CSD_PWR_CL_52_195]; } else if (clock <= MMC_TYPE_HS200_HS400ES_MAX) value = ext_csd[EXT_CSD_PWR_CL_200_195]; break; case MMC_OCR_270_280: case MMC_OCR_280_290: case MMC_OCR_290_300: case MMC_OCR_300_310: case MMC_OCR_310_320: case MMC_OCR_320_330: case MMC_OCR_330_340: case MMC_OCR_340_350: case MMC_OCR_350_360: if (clock <= MMC_TYPE_HS_26_MAX) value = ext_csd[EXT_CSD_PWR_CL_26_360]; else if (clock <= MMC_TYPE_HS_52_MAX) { if (timing == bus_timing_mmc_ddr52 && bus_width >= bus_width_4) value = ext_csd[EXT_CSD_PWR_CL_52_360_DDR]; else value = ext_csd[EXT_CSD_PWR_CL_52_360]; } else if (clock <= MMC_TYPE_HS200_HS400ES_MAX) { if (bus_width == bus_width_8) value = ext_csd[EXT_CSD_PWR_CL_200_360_DDR]; else value = ext_csd[EXT_CSD_PWR_CL_200_360]; } break; default: device_printf(dev, "No power class support for VDD 0x%x\n", 1 << mmcbr_get_vdd(dev)); return (MMC_ERR_INVALID); } if (bus_width == bus_width_8) value = (value & EXT_CSD_POWER_CLASS_8BIT_MASK) >> EXT_CSD_POWER_CLASS_8BIT_SHIFT; else value = (value & EXT_CSD_POWER_CLASS_4BIT_MASK) >> EXT_CSD_POWER_CLASS_4BIT_SHIFT; if (value == 0) return (MMC_ERR_NONE); return (mmc_switch(dev, dev, ivar->rca, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_POWER_CLASS, value, ivar->cmd6_time, true)); } static int mmc_set_timing(struct mmc_softc *sc, struct mmc_ivars *ivar, enum mmc_bus_timing timing) { u_char switch_res[64]; uint8_t value; int err; if (mmcbr_get_mode(sc->dev) == mode_sd) { switch (timing) { case bus_timing_normal: value = SD_SWITCH_NORMAL_MODE; break; case bus_timing_hs: value = SD_SWITCH_HS_MODE; break; default: return (MMC_ERR_INVALID); } err = mmc_sd_switch(sc, SD_SWITCH_MODE_SET, SD_SWITCH_GROUP1, value, switch_res); if (err != MMC_ERR_NONE) return (err); if ((switch_res[16] & 0xf) != value) return (MMC_ERR_FAILED); mmcbr_set_timing(sc->dev, timing); mmcbr_update_ios(sc->dev); } else { switch (timing) { case bus_timing_normal: value = EXT_CSD_HS_TIMING_BC; break; case bus_timing_hs: case bus_timing_mmc_ddr52: value = EXT_CSD_HS_TIMING_HS; break; case bus_timing_mmc_hs200: value = EXT_CSD_HS_TIMING_HS200; break; case bus_timing_mmc_hs400: case bus_timing_mmc_hs400es: value = EXT_CSD_HS_TIMING_HS400; break; default: return (MMC_ERR_INVALID); } err = mmc_switch(sc->dev, sc->dev, ivar->rca, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, value, ivar->cmd6_time, false); if (err != MMC_ERR_NONE) return (err); mmcbr_set_timing(sc->dev, timing); mmcbr_update_ios(sc->dev); err = mmc_switch_status(sc->dev, sc->dev, ivar->rca, ivar->cmd6_time); } return (err); } static int mmc_set_vccq(struct mmc_softc *sc, struct mmc_ivars *ivar, enum mmc_bus_timing timing) { if (isset(&ivar->vccq_120, timing)) mmcbr_set_vccq(sc->dev, vccq_120); else if (isset(&ivar->vccq_180, timing)) mmcbr_set_vccq(sc->dev, vccq_180); else mmcbr_set_vccq(sc->dev, vccq_330); if (mmcbr_switch_vccq(sc->dev) != 0) return (MMC_ERR_INVALID); else return (MMC_ERR_NONE); } static const uint8_t p8[8] = { 0x55, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const uint8_t p8ok[8] = { 0xAA, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const uint8_t p4[4] = { 0x5A, 0x00, 0x00, 0x00 }; static const uint8_t p4ok[4] = { 0xA5, 0x00, 0x00, 0x00 }; static int mmc_test_bus_width(struct mmc_softc *sc) { struct mmc_command cmd; struct mmc_data data; uint8_t buf[8]; int err; if (mmcbr_get_caps(sc->dev) & MMC_CAP_8_BIT_DATA) { mmcbr_set_bus_width(sc->dev, bus_width_8); mmcbr_update_ios(sc->dev); sc->squelched++; /* Errors are expected, squelch reporting. */ memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); cmd.opcode = MMC_BUSTEST_W; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.data = &data; data.data = __DECONST(void *, p8); data.len = 8; data.flags = MMC_DATA_WRITE; mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, 0); memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); cmd.opcode = MMC_BUSTEST_R; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.data = &data; data.data = buf; data.len = 8; data.flags = MMC_DATA_READ; err = mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, 0); sc->squelched--; mmcbr_set_bus_width(sc->dev, bus_width_1); mmcbr_update_ios(sc->dev); if (err == MMC_ERR_NONE && memcmp(buf, p8ok, 8) == 0) return (bus_width_8); } if (mmcbr_get_caps(sc->dev) & MMC_CAP_4_BIT_DATA) { mmcbr_set_bus_width(sc->dev, bus_width_4); mmcbr_update_ios(sc->dev); sc->squelched++; /* Errors are expected, squelch reporting. */ memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); cmd.opcode = MMC_BUSTEST_W; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.data = &data; data.data = __DECONST(void *, p4); data.len = 4; data.flags = MMC_DATA_WRITE; mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, 0); memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); cmd.opcode = MMC_BUSTEST_R; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.data = &data; data.data = buf; data.len = 4; data.flags = MMC_DATA_READ; err = mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, 0); sc->squelched--; mmcbr_set_bus_width(sc->dev, bus_width_1); mmcbr_update_ios(sc->dev); if (err == MMC_ERR_NONE && memcmp(buf, p4ok, 4) == 0) return (bus_width_4); } return (bus_width_1); } static uint32_t mmc_get_bits(uint32_t *bits, int bit_len, int start, int size) { const int i = (bit_len / 32) - (start / 32) - 1; const int shift = start & 31; uint32_t retval = bits[i] >> shift; if (size + shift > 32) retval |= bits[i - 1] << (32 - shift); return (retval & ((1llu << size) - 1)); } static void mmc_decode_cid_sd(uint32_t *raw_cid, struct mmc_cid *cid) { int i; /* There's no version info, so we take it on faith */ memset(cid, 0, sizeof(*cid)); cid->mid = mmc_get_bits(raw_cid, 128, 120, 8); cid->oid = mmc_get_bits(raw_cid, 128, 104, 16); for (i = 0; i < 5; i++) cid->pnm[i] = mmc_get_bits(raw_cid, 128, 96 - i * 8, 8); cid->pnm[5] = 0; cid->prv = mmc_get_bits(raw_cid, 128, 56, 8); cid->psn = mmc_get_bits(raw_cid, 128, 24, 32); cid->mdt_year = mmc_get_bits(raw_cid, 128, 12, 8) + 2000; cid->mdt_month = mmc_get_bits(raw_cid, 128, 8, 4); } static void mmc_decode_cid_mmc(uint32_t *raw_cid, struct mmc_cid *cid, bool is_4_41p) { int i; /* There's no version info, so we take it on faith */ memset(cid, 0, sizeof(*cid)); cid->mid = mmc_get_bits(raw_cid, 128, 120, 8); cid->oid = mmc_get_bits(raw_cid, 128, 104, 8); for (i = 0; i < 6; i++) cid->pnm[i] = mmc_get_bits(raw_cid, 128, 96 - i * 8, 8); cid->pnm[6] = 0; cid->prv = mmc_get_bits(raw_cid, 128, 48, 8); cid->psn = mmc_get_bits(raw_cid, 128, 16, 32); cid->mdt_month = mmc_get_bits(raw_cid, 128, 12, 4); cid->mdt_year = mmc_get_bits(raw_cid, 128, 8, 4); if (is_4_41p) cid->mdt_year += 2013; else cid->mdt_year += 1997; } static void mmc_format_card_id_string(struct mmc_ivars *ivar) { char oidstr[8]; uint8_t c1; uint8_t c2; /* * Format a card ID string for use by the mmcsd driver, it's what * appears between the <> in the following: * mmcsd0: 968MB at mmc0 * 22.5MHz/4bit/128-block * * Also format just the card serial number, which the mmcsd driver will * use as the disk->d_ident string. * * The card_id_string in mmc_ivars is currently allocated as 64 bytes, * and our max formatted length is currently 55 bytes if every field * contains the largest value. * * Sometimes the oid is two printable ascii chars; when it's not, * format it as 0xnnnn instead. */ c1 = (ivar->cid.oid >> 8) & 0x0ff; c2 = ivar->cid.oid & 0x0ff; if (c1 > 0x1f && c1 < 0x7f && c2 > 0x1f && c2 < 0x7f) snprintf(oidstr, sizeof(oidstr), "%c%c", c1, c2); else snprintf(oidstr, sizeof(oidstr), "0x%04x", ivar->cid.oid); snprintf(ivar->card_sn_string, sizeof(ivar->card_sn_string), "%08X", ivar->cid.psn); snprintf(ivar->card_id_string, sizeof(ivar->card_id_string), "%s%s %s %d.%d SN %08X MFG %02d/%04d by %d %s", ivar->mode == mode_sd ? "SD" : "MMC", ivar->high_cap ? "HC" : "", ivar->cid.pnm, ivar->cid.prv >> 4, ivar->cid.prv & 0x0f, ivar->cid.psn, ivar->cid.mdt_month, ivar->cid.mdt_year, ivar->cid.mid, oidstr); } static const int exp[8] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000 }; static const int mant[16] = { 0, 10, 12, 13, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 70, 80 }; static const int cur_min[8] = { 500, 1000, 5000, 10000, 25000, 35000, 60000, 100000 }; static const int cur_max[8] = { 1000, 5000, 10000, 25000, 35000, 45000, 800000, 200000 }; static int mmc_decode_csd_sd(uint32_t *raw_csd, struct mmc_csd *csd) { int v; int m; int e; memset(csd, 0, sizeof(*csd)); csd->csd_structure = v = mmc_get_bits(raw_csd, 128, 126, 2); if (v == 0) { m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = (exp[e] * mant[m] + 9) / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->vdd_r_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 59, 3)]; csd->vdd_r_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 56, 3)]; csd->vdd_w_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 53, 3)]; csd->vdd_w_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 50, 3)]; m = mmc_get_bits(raw_csd, 128, 62, 12); e = mmc_get_bits(raw_csd, 128, 47, 3); csd->capacity = ((1 + m) << (e + 2)) * csd->read_bl_len; csd->erase_blk_en = mmc_get_bits(raw_csd, 128, 46, 1); csd->erase_sector = mmc_get_bits(raw_csd, 128, 39, 7) + 1; csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 7); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); return (MMC_ERR_NONE); } else if (v == 1) { m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = (exp[e] * mant[m] + 9) / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->capacity = ((uint64_t)mmc_get_bits(raw_csd, 128, 48, 22) + 1) * 512 * 1024; csd->erase_blk_en = mmc_get_bits(raw_csd, 128, 46, 1); csd->erase_sector = mmc_get_bits(raw_csd, 128, 39, 7) + 1; csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 7); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); return (MMC_ERR_NONE); } return (MMC_ERR_INVALID); } static void mmc_decode_csd_mmc(uint32_t *raw_csd, struct mmc_csd *csd) { int m; int e; memset(csd, 0, sizeof(*csd)); csd->csd_structure = mmc_get_bits(raw_csd, 128, 126, 2); csd->spec_vers = mmc_get_bits(raw_csd, 128, 122, 4); m = mmc_get_bits(raw_csd, 128, 115, 4); e = mmc_get_bits(raw_csd, 128, 112, 3); csd->tacc = exp[e] * mant[m] + 9 / 10; csd->nsac = mmc_get_bits(raw_csd, 128, 104, 8) * 100; m = mmc_get_bits(raw_csd, 128, 99, 4); e = mmc_get_bits(raw_csd, 128, 96, 3); csd->tran_speed = exp[e] * 10000 * mant[m]; csd->ccc = mmc_get_bits(raw_csd, 128, 84, 12); csd->read_bl_len = 1 << mmc_get_bits(raw_csd, 128, 80, 4); csd->read_bl_partial = mmc_get_bits(raw_csd, 128, 79, 1); csd->write_blk_misalign = mmc_get_bits(raw_csd, 128, 78, 1); csd->read_blk_misalign = mmc_get_bits(raw_csd, 128, 77, 1); csd->dsr_imp = mmc_get_bits(raw_csd, 128, 76, 1); csd->vdd_r_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 59, 3)]; csd->vdd_r_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 56, 3)]; csd->vdd_w_curr_min = cur_min[mmc_get_bits(raw_csd, 128, 53, 3)]; csd->vdd_w_curr_max = cur_max[mmc_get_bits(raw_csd, 128, 50, 3)]; m = mmc_get_bits(raw_csd, 128, 62, 12); e = mmc_get_bits(raw_csd, 128, 47, 3); csd->capacity = ((1 + m) << (e + 2)) * csd->read_bl_len; csd->erase_blk_en = 0; csd->erase_sector = (mmc_get_bits(raw_csd, 128, 42, 5) + 1) * (mmc_get_bits(raw_csd, 128, 37, 5) + 1); csd->wp_grp_size = mmc_get_bits(raw_csd, 128, 32, 5); csd->wp_grp_enable = mmc_get_bits(raw_csd, 128, 31, 1); csd->r2w_factor = 1 << mmc_get_bits(raw_csd, 128, 26, 3); csd->write_bl_len = 1 << mmc_get_bits(raw_csd, 128, 22, 4); csd->write_bl_partial = mmc_get_bits(raw_csd, 128, 21, 1); } static void mmc_app_decode_scr(uint32_t *raw_scr, struct mmc_scr *scr) { unsigned int scr_struct; memset(scr, 0, sizeof(*scr)); scr_struct = mmc_get_bits(raw_scr, 64, 60, 4); if (scr_struct != 0) { printf("Unrecognised SCR structure version %d\n", scr_struct); return; } scr->sda_vsn = mmc_get_bits(raw_scr, 64, 56, 4); scr->bus_widths = mmc_get_bits(raw_scr, 64, 48, 4); } static void mmc_app_decode_sd_status(uint32_t *raw_sd_status, struct mmc_sd_status *sd_status) { memset(sd_status, 0, sizeof(*sd_status)); sd_status->bus_width = mmc_get_bits(raw_sd_status, 512, 510, 2); sd_status->secured_mode = mmc_get_bits(raw_sd_status, 512, 509, 1); sd_status->card_type = mmc_get_bits(raw_sd_status, 512, 480, 16); sd_status->prot_area = mmc_get_bits(raw_sd_status, 512, 448, 12); sd_status->speed_class = mmc_get_bits(raw_sd_status, 512, 440, 8); sd_status->perf_move = mmc_get_bits(raw_sd_status, 512, 432, 8); sd_status->au_size = mmc_get_bits(raw_sd_status, 512, 428, 4); sd_status->erase_size = mmc_get_bits(raw_sd_status, 512, 408, 16); sd_status->erase_timeout = mmc_get_bits(raw_sd_status, 512, 402, 6); sd_status->erase_offset = mmc_get_bits(raw_sd_status, 512, 400, 2); } static int mmc_all_send_cid(struct mmc_softc *sc, uint32_t *rawcid) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_ALL_SEND_CID; cmd.arg = 0; cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; cmd.data = NULL; err = mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, CMD_RETRIES); memcpy(rawcid, cmd.resp, 4 * sizeof(uint32_t)); return (err); } static int mmc_send_csd(struct mmc_softc *sc, uint16_t rca, uint32_t *rawcsd) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_SEND_CSD; cmd.arg = rca << 16; cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; cmd.data = NULL; err = mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, CMD_RETRIES); memcpy(rawcsd, cmd.resp, 4 * sizeof(uint32_t)); return (err); } static int mmc_app_send_scr(struct mmc_softc *sc, uint16_t rca, uint32_t *rawscr) { int err; struct mmc_command cmd; struct mmc_data data; memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); memset(rawscr, 0, 8); cmd.opcode = ACMD_SEND_SCR; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.arg = 0; cmd.data = &data; data.data = rawscr; data.len = 8; data.flags = MMC_DATA_READ; err = mmc_wait_for_app_cmd(sc->dev, sc->dev, rca, &cmd, CMD_RETRIES); rawscr[0] = be32toh(rawscr[0]); rawscr[1] = be32toh(rawscr[1]); return (err); } static int mmc_app_sd_status(struct mmc_softc *sc, uint16_t rca, uint32_t *rawsdstatus) { struct mmc_command cmd; struct mmc_data data; int err, i; memset(&cmd, 0, sizeof(cmd)); memset(&data, 0, sizeof(data)); memset(rawsdstatus, 0, 64); cmd.opcode = ACMD_SD_STATUS; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.arg = 0; cmd.data = &data; data.data = rawsdstatus; data.len = 64; data.flags = MMC_DATA_READ; err = mmc_wait_for_app_cmd(sc->dev, sc->dev, rca, &cmd, CMD_RETRIES); for (i = 0; i < 16; i++) rawsdstatus[i] = be32toh(rawsdstatus[i]); return (err); } static int mmc_set_relative_addr(struct mmc_softc *sc, uint16_t resp) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_SET_RELATIVE_ADDR; cmd.arg = resp << 16; cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; cmd.data = NULL; err = mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, CMD_RETRIES); return (err); } static int mmc_send_relative_addr(struct mmc_softc *sc, uint32_t *resp) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = SD_SEND_RELATIVE_ADDR; cmd.arg = 0; cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; cmd.data = NULL; err = mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, CMD_RETRIES); *resp = cmd.resp[0]; return (err); } static int mmc_set_blocklen(struct mmc_softc *sc, uint32_t len) { struct mmc_command cmd; int err; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_SET_BLOCKLEN; cmd.arg = len; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; cmd.data = NULL; err = mmc_wait_for_cmd(sc->dev, sc->dev, &cmd, CMD_RETRIES); return (err); } static uint32_t mmc_timing_to_dtr(struct mmc_ivars *ivar, enum mmc_bus_timing timing) { switch (timing) { case bus_timing_normal: return (ivar->tran_speed); case bus_timing_hs: return (ivar->hs_tran_speed); case bus_timing_uhs_sdr12: return (SD_SDR12_MAX); case bus_timing_uhs_sdr25: return (SD_SDR25_MAX); case bus_timing_uhs_ddr50: return (SD_DDR50_MAX); case bus_timing_uhs_sdr50: return (SD_SDR50_MAX); case bus_timing_uhs_sdr104: return (SD_SDR104_MAX); case bus_timing_mmc_ddr52: return (MMC_TYPE_DDR52_MAX); case bus_timing_mmc_hs200: case bus_timing_mmc_hs400: case bus_timing_mmc_hs400es: return (MMC_TYPE_HS200_HS400ES_MAX); } return (0); } static const char * mmc_timing_to_string(enum mmc_bus_timing timing) { switch (timing) { case bus_timing_normal: return ("normal speed"); case bus_timing_hs: return ("high speed"); case bus_timing_uhs_sdr12: case bus_timing_uhs_sdr25: case bus_timing_uhs_sdr50: case bus_timing_uhs_sdr104: return ("single data rate"); case bus_timing_uhs_ddr50: case bus_timing_mmc_ddr52: return ("dual data rate"); case bus_timing_mmc_hs200: return ("HS200"); case bus_timing_mmc_hs400: return ("HS400"); case bus_timing_mmc_hs400es: return ("HS400 with enhanced strobe"); } return (""); } static bool mmc_host_timing(device_t dev, enum mmc_bus_timing timing) { int host_caps; host_caps = mmcbr_get_caps(dev); #define HOST_TIMING_CAP(host_caps, cap) ({ \ bool retval; \ if (((host_caps) & (cap)) == (cap)) \ retval = true; \ else \ retval = false; \ retval; \ }) switch (timing) { case bus_timing_normal: return (true); case bus_timing_hs: return (HOST_TIMING_CAP(host_caps, MMC_CAP_HSPEED)); case bus_timing_uhs_sdr12: return (HOST_TIMING_CAP(host_caps, MMC_CAP_UHS_SDR12)); case bus_timing_uhs_sdr25: return (HOST_TIMING_CAP(host_caps, MMC_CAP_UHS_SDR25)); case bus_timing_uhs_ddr50: return (HOST_TIMING_CAP(host_caps, MMC_CAP_UHS_DDR50)); case bus_timing_uhs_sdr50: return (HOST_TIMING_CAP(host_caps, MMC_CAP_UHS_SDR50)); case bus_timing_uhs_sdr104: return (HOST_TIMING_CAP(host_caps, MMC_CAP_UHS_SDR104)); case bus_timing_mmc_ddr52: return (HOST_TIMING_CAP(host_caps, MMC_CAP_MMC_DDR52)); case bus_timing_mmc_hs200: return (HOST_TIMING_CAP(host_caps, MMC_CAP_MMC_HS200)); case bus_timing_mmc_hs400: return (HOST_TIMING_CAP(host_caps, MMC_CAP_MMC_HS400)); case bus_timing_mmc_hs400es: return (HOST_TIMING_CAP(host_caps, MMC_CAP_MMC_HS400 | MMC_CAP_MMC_ENH_STROBE)); } #undef HOST_TIMING_CAP return (false); } static void mmc_log_card(device_t dev, struct mmc_ivars *ivar, int newcard) { enum mmc_bus_timing timing; device_printf(dev, "Card at relative address 0x%04x%s:\n", ivar->rca, newcard ? " added" : ""); device_printf(dev, " card: %s\n", ivar->card_id_string); for (timing = bus_timing_max; timing > bus_timing_normal; timing--) { if (isset(&ivar->timings, timing)) break; } device_printf(dev, " quirks: %b\n", ivar->quirks, MMC_QUIRKS_FMT); device_printf(dev, " bus: %ubit, %uMHz (%s timing)\n", (ivar->bus_width == bus_width_1 ? 1 : (ivar->bus_width == bus_width_4 ? 4 : 8)), mmc_timing_to_dtr(ivar, timing) / 1000000, mmc_timing_to_string(timing)); device_printf(dev, " memory: %u blocks, erase sector %u blocks%s\n", ivar->sec_count, ivar->erase_sector, ivar->read_only ? ", read-only" : ""); } static void mmc_discover_cards(struct mmc_softc *sc) { u_char switch_res[64]; uint32_t raw_cid[4]; struct mmc_ivars *ivar = NULL; const struct mmc_quirk *quirk; const uint8_t *ext_csd; device_t child; int err, host_caps, i, newcard; uint32_t resp, sec_count, status; uint16_t rca = 2; int16_t rev; uint8_t card_type; host_caps = mmcbr_get_caps(sc->dev); if (bootverbose || mmc_debug) device_printf(sc->dev, "Probing cards\n"); while (1) { child = NULL; sc->squelched++; /* Errors are expected, squelch reporting. */ err = mmc_all_send_cid(sc, raw_cid); sc->squelched--; if (err == MMC_ERR_TIMEOUT) break; if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error reading CID %d\n", err); break; } newcard = 1; for (i = 0; i < sc->child_count; i++) { ivar = device_get_ivars(sc->child_list[i]); if (memcmp(ivar->raw_cid, raw_cid, sizeof(raw_cid)) == 0) { newcard = 0; break; } } if (bootverbose || mmc_debug) { device_printf(sc->dev, "%sard detected (CID %08x%08x%08x%08x)\n", newcard ? "New c" : "C", raw_cid[0], raw_cid[1], raw_cid[2], raw_cid[3]); } if (newcard) { ivar = malloc(sizeof(struct mmc_ivars), M_DEVBUF, M_WAITOK | M_ZERO); memcpy(ivar->raw_cid, raw_cid, sizeof(raw_cid)); } if (mmcbr_get_ro(sc->dev)) ivar->read_only = 1; ivar->bus_width = bus_width_1; setbit(&ivar->timings, bus_timing_normal); ivar->mode = mmcbr_get_mode(sc->dev); if (ivar->mode == mode_sd) { mmc_decode_cid_sd(ivar->raw_cid, &ivar->cid); err = mmc_send_relative_addr(sc, &resp); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error getting RCA %d\n", err); goto free_ivar; } ivar->rca = resp >> 16; /* Get card CSD. */ err = mmc_send_csd(sc, ivar->rca, ivar->raw_csd); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error getting CSD %d\n", err); goto free_ivar; } if (bootverbose || mmc_debug) device_printf(sc->dev, "%sard detected (CSD %08x%08x%08x%08x)\n", newcard ? "New c" : "C", ivar->raw_csd[0], ivar->raw_csd[1], ivar->raw_csd[2], ivar->raw_csd[3]); err = mmc_decode_csd_sd(ivar->raw_csd, &ivar->csd); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error decoding CSD\n"); goto free_ivar; } ivar->sec_count = ivar->csd.capacity / MMC_SECTOR_SIZE; if (ivar->csd.csd_structure > 0) ivar->high_cap = 1; ivar->tran_speed = ivar->csd.tran_speed; ivar->erase_sector = ivar->csd.erase_sector * ivar->csd.write_bl_len / MMC_SECTOR_SIZE; err = mmc_send_status(sc->dev, sc->dev, ivar->rca, &status); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error reading card status %d\n", err); goto free_ivar; } if ((status & R1_CARD_IS_LOCKED) != 0) { device_printf(sc->dev, "Card is password protected, skipping\n"); goto free_ivar; } /* Get card SCR. Card must be selected to fetch it. */ err = mmc_select_card(sc, ivar->rca); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error selecting card %d\n", err); goto free_ivar; } err = mmc_app_send_scr(sc, ivar->rca, ivar->raw_scr); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error reading SCR %d\n", err); goto free_ivar; } mmc_app_decode_scr(ivar->raw_scr, &ivar->scr); /* Get card switch capabilities (command class 10). */ if ((ivar->scr.sda_vsn >= 1) && (ivar->csd.ccc & (1 << 10))) { err = mmc_sd_switch(sc, SD_SWITCH_MODE_CHECK, SD_SWITCH_GROUP1, SD_SWITCH_NOCHANGE, switch_res); if (err == MMC_ERR_NONE && switch_res[13] & (1 << SD_SWITCH_HS_MODE)) { setbit(&ivar->timings, bus_timing_hs); ivar->hs_tran_speed = SD_HS_MAX; } } /* * We deselect then reselect the card here. Some cards * become unselected and timeout with the above two * commands, although the state tables / diagrams in the * standard suggest they go back to the transfer state. * Other cards don't become deselected, and if we * attempt to blindly re-select them, we get timeout * errors from some controllers. So we deselect then * reselect to handle all situations. The only thing we * use from the sd_status is the erase sector size, but * it is still nice to get that right. */ (void)mmc_select_card(sc, 0); (void)mmc_select_card(sc, ivar->rca); (void)mmc_app_sd_status(sc, ivar->rca, ivar->raw_sd_status); mmc_app_decode_sd_status(ivar->raw_sd_status, &ivar->sd_status); if (ivar->sd_status.au_size != 0) { ivar->erase_sector = 16 << ivar->sd_status.au_size; } /* Find maximum supported bus width. */ if ((host_caps & MMC_CAP_4_BIT_DATA) && (ivar->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) ivar->bus_width = bus_width_4; goto child_common; } ivar->rca = rca++; err = mmc_set_relative_addr(sc, ivar->rca); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error setting RCA %d\n", err); goto free_ivar; } /* Get card CSD. */ err = mmc_send_csd(sc, ivar->rca, ivar->raw_csd); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error getting CSD %d\n", err); goto free_ivar; } if (bootverbose || mmc_debug) device_printf(sc->dev, "%sard detected (CSD %08x%08x%08x%08x)\n", newcard ? "New c" : "C", ivar->raw_csd[0], ivar->raw_csd[1], ivar->raw_csd[2], ivar->raw_csd[3]); mmc_decode_csd_mmc(ivar->raw_csd, &ivar->csd); ivar->sec_count = ivar->csd.capacity / MMC_SECTOR_SIZE; ivar->tran_speed = ivar->csd.tran_speed; ivar->erase_sector = ivar->csd.erase_sector * ivar->csd.write_bl_len / MMC_SECTOR_SIZE; err = mmc_send_status(sc->dev, sc->dev, ivar->rca, &status); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error reading card status %d\n", err); goto free_ivar; } if ((status & R1_CARD_IS_LOCKED) != 0) { device_printf(sc->dev, "Card is password protected, skipping\n"); goto free_ivar; } err = mmc_select_card(sc, ivar->rca); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error selecting card %d\n", err); goto free_ivar; } rev = -1; /* Only MMC >= 4.x devices support EXT_CSD. */ if (ivar->csd.spec_vers >= 4) { err = mmc_send_ext_csd(sc->dev, sc->dev, ivar->raw_ext_csd); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error reading EXT_CSD %d\n", err); goto free_ivar; } ext_csd = ivar->raw_ext_csd; rev = ext_csd[EXT_CSD_REV]; /* Handle extended capacity from EXT_CSD */ sec_count = le32dec(&ext_csd[EXT_CSD_SEC_CNT]); if (sec_count != 0) { ivar->sec_count = sec_count; ivar->high_cap = 1; } /* Find maximum supported bus width. */ ivar->bus_width = mmc_test_bus_width(sc); /* Get device speeds beyond normal mode. */ card_type = ext_csd[EXT_CSD_CARD_TYPE]; if ((card_type & EXT_CSD_CARD_TYPE_HS_52) != 0) { setbit(&ivar->timings, bus_timing_hs); ivar->hs_tran_speed = MMC_TYPE_HS_52_MAX; } else if ((card_type & EXT_CSD_CARD_TYPE_HS_26) != 0) { setbit(&ivar->timings, bus_timing_hs); ivar->hs_tran_speed = MMC_TYPE_HS_26_MAX; } if ((card_type & EXT_CSD_CARD_TYPE_DDR_52_1_2V) != 0 && (host_caps & MMC_CAP_SIGNALING_120) != 0) { setbit(&ivar->timings, bus_timing_mmc_ddr52); setbit(&ivar->vccq_120, bus_timing_mmc_ddr52); } if ((card_type & EXT_CSD_CARD_TYPE_DDR_52_1_8V) != 0 && (host_caps & MMC_CAP_SIGNALING_180) != 0) { setbit(&ivar->timings, bus_timing_mmc_ddr52); setbit(&ivar->vccq_180, bus_timing_mmc_ddr52); } if ((card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) != 0 && (host_caps & MMC_CAP_SIGNALING_120) != 0) { setbit(&ivar->timings, bus_timing_mmc_hs200); setbit(&ivar->vccq_120, bus_timing_mmc_hs200); } if ((card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) != 0 && (host_caps & MMC_CAP_SIGNALING_180) != 0) { setbit(&ivar->timings, bus_timing_mmc_hs200); setbit(&ivar->vccq_180, bus_timing_mmc_hs200); } if ((card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) != 0 && (host_caps & MMC_CAP_SIGNALING_120) != 0 && ivar->bus_width == bus_width_8) { setbit(&ivar->timings, bus_timing_mmc_hs400); setbit(&ivar->vccq_120, bus_timing_mmc_hs400); } if ((card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) != 0 && (host_caps & MMC_CAP_SIGNALING_180) != 0 && ivar->bus_width == bus_width_8) { setbit(&ivar->timings, bus_timing_mmc_hs400); setbit(&ivar->vccq_180, bus_timing_mmc_hs400); } if ((card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) != 0 && (ext_csd[EXT_CSD_STROBE_SUPPORT] & EXT_CSD_STROBE_SUPPORT_EN) != 0 && (host_caps & MMC_CAP_SIGNALING_120) != 0 && ivar->bus_width == bus_width_8) { setbit(&ivar->timings, bus_timing_mmc_hs400es); setbit(&ivar->vccq_120, bus_timing_mmc_hs400es); } if ((card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) != 0 && (ext_csd[EXT_CSD_STROBE_SUPPORT] & EXT_CSD_STROBE_SUPPORT_EN) != 0 && (host_caps & MMC_CAP_SIGNALING_180) != 0 && ivar->bus_width == bus_width_8) { setbit(&ivar->timings, bus_timing_mmc_hs400es); setbit(&ivar->vccq_180, bus_timing_mmc_hs400es); } /* * Determine generic switch timeout (provided in * units of 10 ms), defaulting to 500 ms. */ ivar->cmd6_time = 500 * 1000; if (rev >= 6) ivar->cmd6_time = 10 * ext_csd[EXT_CSD_GEN_CMD6_TIME]; /* Handle HC erase sector size. */ if (ext_csd[EXT_CSD_ERASE_GRP_SIZE] != 0) { ivar->erase_sector = 1024 * ext_csd[EXT_CSD_ERASE_GRP_SIZE]; err = mmc_switch(sc->dev, sc->dev, ivar->rca, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_ERASE_GRP_DEF, EXT_CSD_ERASE_GRP_DEF_EN, ivar->cmd6_time, true); if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error setting erase group %d\n", err); goto free_ivar; } } } mmc_decode_cid_mmc(ivar->raw_cid, &ivar->cid, rev >= 5); child_common: for (quirk = &mmc_quirks[0]; quirk->mid != 0x0; quirk++) { if ((quirk->mid == MMC_QUIRK_MID_ANY || quirk->mid == ivar->cid.mid) && (quirk->oid == MMC_QUIRK_OID_ANY || quirk->oid == ivar->cid.oid) && strncmp(quirk->pnm, ivar->cid.pnm, sizeof(ivar->cid.pnm)) == 0) { ivar->quirks = quirk->quirks; break; } } /* * Some cards that report maximum I/O block sizes greater * than 512 require the block length to be set to 512, even * though that is supposed to be the default. Example: * * Transcend 2GB SDSC card, CID: * mid=0x1b oid=0x534d pnm="00000" prv=1.0 mdt=00.2000 */ if (ivar->csd.read_bl_len != MMC_SECTOR_SIZE || ivar->csd.write_bl_len != MMC_SECTOR_SIZE) mmc_set_blocklen(sc, MMC_SECTOR_SIZE); mmc_format_card_id_string(ivar); if (bootverbose || mmc_debug) mmc_log_card(sc->dev, ivar, newcard); if (newcard) { /* Add device. */ child = device_add_child(sc->dev, NULL, -1); if (child != NULL) { device_set_ivars(child, ivar); sc->child_list = realloc(sc->child_list, sizeof(device_t) * sc->child_count + 1, M_DEVBUF, M_WAITOK); sc->child_list[sc->child_count++] = child; } else device_printf(sc->dev, "Error adding child\n"); } free_ivar: if (newcard && child == NULL) free(ivar, M_DEVBUF); (void)mmc_select_card(sc, 0); /* * Not returning here when one MMC device could no be added * potentially would mean looping forever when that device * is broken (in which case it also may impact the remainder * of the bus anyway, though). */ if ((newcard && child == NULL) || mmcbr_get_mode(sc->dev) == mode_sd) return; } } static void mmc_update_child_list(struct mmc_softc *sc) { device_t child; int i, j; if (sc->child_count == 0) { free(sc->child_list, M_DEVBUF); return; } for (i = j = 0; i < sc->child_count; i++) { for (;;) { child = sc->child_list[j++]; if (child != NULL) break; } if (i != j) sc->child_list[i] = child; } sc->child_list = realloc(sc->child_list, sizeof(device_t) * sc->child_count, M_DEVBUF, M_WAITOK); } static void mmc_rescan_cards(struct mmc_softc *sc) { struct mmc_ivars *ivar; int err, i, j; for (i = j = 0; i < sc->child_count; i++) { ivar = device_get_ivars(sc->child_list[i]); if (mmc_select_card(sc, ivar->rca) != MMC_ERR_NONE) { if (bootverbose || mmc_debug) device_printf(sc->dev, "Card at relative address %d lost\n", ivar->rca); err = device_delete_child(sc->dev, sc->child_list[i]); if (err != 0) { j++; continue; } free(ivar, M_DEVBUF); } else j++; } if (sc->child_count == j) goto out; sc->child_count = j; mmc_update_child_list(sc); out: (void)mmc_select_card(sc, 0); } static int mmc_delete_cards(struct mmc_softc *sc, bool final) { struct mmc_ivars *ivar; int err, i, j; err = 0; for (i = j = 0; i < sc->child_count; i++) { ivar = device_get_ivars(sc->child_list[i]); if (bootverbose || mmc_debug) device_printf(sc->dev, "Card at relative address %d deleted\n", ivar->rca); err = device_delete_child(sc->dev, sc->child_list[i]); if (err != 0) { j++; if (final == false) continue; else break; } free(ivar, M_DEVBUF); } sc->child_count = j; mmc_update_child_list(sc); return (err); } static void mmc_go_discovery(struct mmc_softc *sc) { uint32_t ocr; device_t dev; int err; dev = sc->dev; if (mmcbr_get_power_mode(dev) != power_on) { /* * First, try SD modes */ sc->squelched++; /* Errors are expected, squelch reporting. */ mmcbr_set_mode(dev, mode_sd); mmc_power_up(sc); mmcbr_set_bus_mode(dev, pushpull); if (bootverbose || mmc_debug) device_printf(sc->dev, "Probing bus\n"); mmc_idle_cards(sc); err = mmc_send_if_cond(sc, 1); if ((bootverbose || mmc_debug) && err == 0) device_printf(sc->dev, "SD 2.0 interface conditions: OK\n"); if (mmc_send_app_op_cond(sc, 0, &ocr) != MMC_ERR_NONE) { if (bootverbose || mmc_debug) device_printf(sc->dev, "SD probe: failed\n"); /* * Failed, try MMC */ mmcbr_set_mode(dev, mode_mmc); if (mmc_send_op_cond(sc, 0, &ocr) != MMC_ERR_NONE) { if (bootverbose || mmc_debug) device_printf(sc->dev, "MMC probe: failed\n"); ocr = 0; /* Failed both, powerdown. */ } else if (bootverbose || mmc_debug) device_printf(sc->dev, "MMC probe: OK (OCR: 0x%08x)\n", ocr); } else if (bootverbose || mmc_debug) device_printf(sc->dev, "SD probe: OK (OCR: 0x%08x)\n", ocr); sc->squelched--; mmcbr_set_ocr(dev, mmc_select_vdd(sc, ocr)); if (mmcbr_get_ocr(dev) != 0) mmc_idle_cards(sc); } else { mmcbr_set_bus_mode(dev, opendrain); mmcbr_set_clock(dev, SD_MMC_CARD_ID_FREQUENCY); mmcbr_update_ios(dev); /* XXX recompute vdd based on new cards? */ } /* * Make sure that we have a mutually agreeable voltage to at least * one card on the bus. */ if (bootverbose || mmc_debug) device_printf(sc->dev, "Current OCR: 0x%08x\n", mmcbr_get_ocr(dev)); if (mmcbr_get_ocr(dev) == 0) { device_printf(sc->dev, "No compatible cards found on bus\n"); (void)mmc_delete_cards(sc, false); mmc_power_down(sc); return; } /* * Reselect the cards after we've idled them above. */ if (mmcbr_get_mode(dev) == mode_sd) { err = mmc_send_if_cond(sc, 1); mmc_send_app_op_cond(sc, (err ? 0 : MMC_OCR_CCS) | mmcbr_get_ocr(dev), NULL); } else mmc_send_op_cond(sc, MMC_OCR_CCS | mmcbr_get_ocr(dev), NULL); mmc_discover_cards(sc); mmc_rescan_cards(sc); mmcbr_set_bus_mode(dev, pushpull); mmcbr_update_ios(dev); mmc_calculate_clock(sc); } static int mmc_calculate_clock(struct mmc_softc *sc) { device_t dev; struct mmc_ivars *ivar; int i; uint32_t dtr, max_dtr; uint16_t rca; enum mmc_bus_timing max_timing, timing; bool changed, hs400; dev = sc->dev; max_dtr = mmcbr_get_f_max(dev); max_timing = bus_timing_max; do { changed = false; for (i = 0; i < sc->child_count; i++) { ivar = device_get_ivars(sc->child_list[i]); if (isclr(&ivar->timings, max_timing) || !mmc_host_timing(dev, max_timing)) { for (timing = max_timing - 1; timing >= bus_timing_normal; timing--) { if (isset(&ivar->timings, timing) && mmc_host_timing(dev, timing)) { max_timing = timing; break; } } changed = true; } dtr = mmc_timing_to_dtr(ivar, max_timing); if (dtr < max_dtr) { max_dtr = dtr; changed = true; } } } while (changed == true); if (bootverbose || mmc_debug) { device_printf(dev, "setting transfer rate to %d.%03dMHz (%s timing)\n", max_dtr / 1000000, (max_dtr / 1000) % 1000, mmc_timing_to_string(max_timing)); } /* * HS400 must be tuned in HS200 mode, so in case of HS400 we begin * with HS200 following the sequence as described in "6.6.2.2 HS200 * timing mode selection" of the eMMC specification v5.1, too, and * switch to max_timing later. HS400ES requires no tuning and, thus, * can be switch to directly, but requires the same detour via high * speed mode as does HS400 (see mmc_switch_to_hs400()). */ hs400 = max_timing == bus_timing_mmc_hs400; timing = hs400 == true ? bus_timing_mmc_hs200 : max_timing; for (i = 0; i < sc->child_count; i++) { ivar = device_get_ivars(sc->child_list[i]); if ((ivar->timings & ~(1 << bus_timing_normal)) == 0) goto clock; rca = ivar->rca; if (mmc_select_card(sc, rca) != MMC_ERR_NONE) { device_printf(dev, "Card at relative address %d " "failed to select\n", rca); continue; } if (timing == bus_timing_mmc_hs200 || /* includes HS400 */ timing == bus_timing_mmc_hs400es) { if (mmc_set_vccq(sc, ivar, timing) != MMC_ERR_NONE) { device_printf(dev, "Failed to set VCCQ for " "card at relative address %d\n", rca); continue; } } if (timing == bus_timing_mmc_hs200) { /* includes HS400 */ /* Set bus width (required for initial tuning). */ if (mmc_set_card_bus_width(sc, ivar, timing) != MMC_ERR_NONE) { device_printf(dev, "Card at relative address " "%d failed to set bus width\n", rca); continue; } mmcbr_set_bus_width(dev, ivar->bus_width); mmcbr_update_ios(dev); } else if (timing == bus_timing_mmc_hs400es) { if (mmc_switch_to_hs400(sc, ivar, max_dtr, timing) != MMC_ERR_NONE) { device_printf(dev, "Card at relative address " "%d failed to set %s timing\n", rca, mmc_timing_to_string(timing)); continue; } goto power_class; } if (mmc_set_timing(sc, ivar, timing) != MMC_ERR_NONE) { device_printf(dev, "Card at relative address %d " "failed to set %s timing\n", rca, mmc_timing_to_string(timing)); continue; } if (timing == bus_timing_mmc_ddr52) { /* * Set EXT_CSD_BUS_WIDTH_n_DDR in EXT_CSD_BUS_WIDTH * (must be done after switching to EXT_CSD_HS_TIMING). */ if (mmc_set_card_bus_width(sc, ivar, timing) != MMC_ERR_NONE) { device_printf(dev, "Card at relative address " "%d failed to set bus width\n", rca); continue; } mmcbr_set_bus_width(dev, ivar->bus_width); mmcbr_update_ios(dev); if (mmc_set_vccq(sc, ivar, timing) != MMC_ERR_NONE) { device_printf(dev, "Failed to set VCCQ for " "card at relative address %d\n", rca); continue; } } clock: /* Set clock (must be done before initial tuning). */ mmcbr_set_clock(dev, max_dtr); mmcbr_update_ios(dev); if (mmcbr_tune(dev, hs400) != 0) { device_printf(dev, "Card at relative address %d " "failed to execute initial tuning\n", rca); continue; } if (hs400 == true && mmc_switch_to_hs400(sc, ivar, max_dtr, max_timing) != MMC_ERR_NONE) { device_printf(dev, "Card at relative address %d " "failed to set %s timing\n", rca, mmc_timing_to_string(max_timing)); continue; } power_class: if (mmc_set_power_class(sc, ivar) != MMC_ERR_NONE) { device_printf(dev, "Card at relative address %d " "failed to set power class\n", rca); } } (void)mmc_select_card(sc, 0); return (max_dtr); } /* * Switch from HS200 to HS400 (either initially or for re-tuning) or directly * to HS400ES. This follows the sequences described in "6.6.2.3 HS400 timing * mode selection" of the eMMC specification v5.1. */ static int mmc_switch_to_hs400(struct mmc_softc *sc, struct mmc_ivars *ivar, uint32_t clock, enum mmc_bus_timing max_timing) { device_t dev; int err; uint16_t rca; dev = sc->dev; rca = ivar->rca; /* * Both clock and timing must be set as appropriate for high speed * before eventually switching to HS400/HS400ES; mmc_set_timing() * will issue mmcbr_update_ios(). */ mmcbr_set_clock(dev, ivar->hs_tran_speed); err = mmc_set_timing(sc, ivar, bus_timing_hs); if (err != MMC_ERR_NONE) return (err); /* * Set EXT_CSD_BUS_WIDTH_8_DDR in EXT_CSD_BUS_WIDTH (and additionally * EXT_CSD_BUS_WIDTH_ES for HS400ES). */ err = mmc_set_card_bus_width(sc, ivar, max_timing); if (err != MMC_ERR_NONE) return (err); mmcbr_set_bus_width(dev, ivar->bus_width); mmcbr_update_ios(dev); /* Finally, switch to HS400/HS400ES mode. */ err = mmc_set_timing(sc, ivar, max_timing); if (err != MMC_ERR_NONE) return (err); mmcbr_set_clock(dev, clock); mmcbr_update_ios(dev); return (MMC_ERR_NONE); } /* * Switch from HS400 to HS200 (for re-tuning). */ static int mmc_switch_to_hs200(struct mmc_softc *sc, struct mmc_ivars *ivar, uint32_t clock) { device_t dev; int err; uint16_t rca; dev = sc->dev; rca = ivar->rca; /* * Both clock and timing must initially be set as appropriate for * DDR52 before eventually switching to HS200; mmc_set_timing() * will issue mmcbr_update_ios(). */ mmcbr_set_clock(dev, ivar->hs_tran_speed); err = mmc_set_timing(sc, ivar, bus_timing_mmc_ddr52); if (err != MMC_ERR_NONE) return (err); /* * Next, switch to high speed. Thus, clear EXT_CSD_BUS_WIDTH_n_DDR * in EXT_CSD_BUS_WIDTH and update bus width and timing in ios. */ err = mmc_set_card_bus_width(sc, ivar, bus_timing_hs); if (err != MMC_ERR_NONE) return (err); mmcbr_set_bus_width(dev, ivar->bus_width); mmcbr_set_timing(sc->dev, bus_timing_hs); mmcbr_update_ios(dev); /* Finally, switch to HS200 mode. */ err = mmc_set_timing(sc, ivar, bus_timing_mmc_hs200); if (err != MMC_ERR_NONE) return (err); mmcbr_set_clock(dev, clock); mmcbr_update_ios(dev); return (MMC_ERR_NONE); } static int mmc_retune(device_t busdev, device_t dev, bool reset) { struct mmc_softc *sc; struct mmc_ivars *ivar; int err; uint32_t clock; enum mmc_bus_timing timing; if (device_get_parent(dev) != busdev) return (MMC_ERR_INVALID); sc = device_get_softc(busdev); if (sc->retune_needed != 1 && sc->retune_paused != 0) return (MMC_ERR_INVALID); timing = mmcbr_get_timing(busdev); if (timing == bus_timing_mmc_hs400) { /* * Controllers use the data strobe line to latch data from * the devices in HS400 mode so periodic re-tuning isn't * expected to be required, i. e. only if a CRC or tuning * error is signaled to the bridge. In these latter cases * we are asked to reset the tuning circuit and need to do * the switch timing dance. */ if (reset == false) return (0); ivar = device_get_ivars(dev); clock = mmcbr_get_clock(busdev); if (mmc_switch_to_hs200(sc, ivar, clock) != MMC_ERR_NONE) return (MMC_ERR_BADCRC); } err = mmcbr_retune(busdev, reset); if (err != 0 && timing == bus_timing_mmc_hs400) return (MMC_ERR_BADCRC); switch (err) { case 0: break; case EIO: return (MMC_ERR_FAILED); default: return (MMC_ERR_INVALID); } if (timing == bus_timing_mmc_hs400) { if (mmc_switch_to_hs400(sc, ivar, clock, timing) != MMC_ERR_NONE) return (MMC_ERR_BADCRC); } return (MMC_ERR_NONE); } static void mmc_retune_pause(device_t busdev, device_t dev, bool retune) { struct mmc_softc *sc; sc = device_get_softc(busdev); KASSERT(device_get_parent(dev) == busdev, ("%s: %s is not a child of %s", __func__, device_get_nameunit(dev), device_get_nameunit(busdev))); KASSERT(sc->owner != NULL, ("%s: Request from %s without bus being acquired.", __func__, device_get_nameunit(dev))); if (retune == true && sc->retune_paused == 0) sc->retune_needed = 1; sc->retune_paused++; } static void mmc_retune_unpause(device_t busdev, device_t dev) { struct mmc_softc *sc; sc = device_get_softc(busdev); KASSERT(device_get_parent(dev) == busdev, ("%s: %s is not a child of %s", __func__, device_get_nameunit(dev), device_get_nameunit(busdev))); KASSERT(sc->owner != NULL, ("%s: Request from %s without bus being acquired.", __func__, device_get_nameunit(dev))); KASSERT(sc->retune_paused != 0, ("%s: Re-tune pause count already at 0", __func__)); sc->retune_paused--; } static void mmc_scan(struct mmc_softc *sc) { device_t dev = sc->dev; int err; err = mmc_acquire_bus(dev, dev); if (err != 0) { device_printf(dev, "Failed to acquire bus for scanning\n"); return; } mmc_go_discovery(sc); err = mmc_release_bus(dev, dev); if (err != 0) { device_printf(dev, "Failed to release bus after scanning\n"); return; } (void)bus_generic_attach(dev); } static int mmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct mmc_ivars *ivar = device_get_ivars(child); switch (which) { default: return (EINVAL); case MMC_IVAR_SPEC_VERS: *result = ivar->csd.spec_vers; break; case MMC_IVAR_DSR_IMP: *result = ivar->csd.dsr_imp; break; case MMC_IVAR_MEDIA_SIZE: *result = ivar->sec_count; break; case MMC_IVAR_RCA: *result = ivar->rca; break; case MMC_IVAR_SECTOR_SIZE: *result = MMC_SECTOR_SIZE; break; case MMC_IVAR_TRAN_SPEED: *result = mmcbr_get_clock(bus); break; case MMC_IVAR_READ_ONLY: *result = ivar->read_only; break; case MMC_IVAR_HIGH_CAP: *result = ivar->high_cap; break; case MMC_IVAR_CARD_TYPE: *result = ivar->mode; break; case MMC_IVAR_BUS_WIDTH: *result = ivar->bus_width; break; case MMC_IVAR_ERASE_SECTOR: *result = ivar->erase_sector; break; case MMC_IVAR_MAX_DATA: *result = mmcbr_get_max_data(bus); break; case MMC_IVAR_CMD6_TIMEOUT: *result = ivar->cmd6_time; break; case MMC_IVAR_QUIRKS: *result = ivar->quirks; break; case MMC_IVAR_CARD_ID_STRING: *(char **)result = ivar->card_id_string; break; case MMC_IVAR_CARD_SN_STRING: *(char **)result = ivar->card_sn_string; break; } return (0); } static int mmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { /* * None are writable ATM */ return (EINVAL); } static void mmc_delayed_attach(void *xsc) { struct mmc_softc *sc = xsc; mmc_scan(sc); config_intrhook_disestablish(&sc->config_intrhook); } static int -mmc_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) +mmc_child_location(device_t dev, device_t child, struct sbuf *sb) { - snprintf(buf, buflen, "rca=0x%04x", mmc_get_rca(child)); + sbuf_printf(sb, "rca=0x%04x", mmc_get_rca(child)); return (0); } static device_method_t mmc_methods[] = { /* device_if */ DEVMETHOD(device_probe, mmc_probe), DEVMETHOD(device_attach, mmc_attach), DEVMETHOD(device_detach, mmc_detach), DEVMETHOD(device_suspend, mmc_suspend), DEVMETHOD(device_resume, mmc_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, mmc_read_ivar), DEVMETHOD(bus_write_ivar, mmc_write_ivar), - DEVMETHOD(bus_child_location_str, mmc_child_location_str), + DEVMETHOD(bus_child_location, mmc_child_location), /* MMC Bus interface */ DEVMETHOD(mmcbus_retune_pause, mmc_retune_pause), DEVMETHOD(mmcbus_retune_unpause, mmc_retune_unpause), DEVMETHOD(mmcbus_wait_for_request, mmc_wait_for_request), DEVMETHOD(mmcbus_acquire_bus, mmc_acquire_bus), DEVMETHOD(mmcbus_release_bus, mmc_release_bus), DEVMETHOD_END }; driver_t mmc_driver = { "mmc", mmc_methods, sizeof(struct mmc_softc), }; devclass_t mmc_devclass; MODULE_VERSION(mmc, MMC_VERSION); diff --git a/sys/dev/mvs/mvs_pci.c b/sys/dev/mvs/mvs_pci.c index 7e9c4111ca60..5f96fdf74fdc 100644 --- a/sys/dev/mvs/mvs_pci.c +++ b/sys/dev/mvs/mvs_pci.c @@ -1,527 +1,527 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include "mvs.h" /* local prototypes */ static int mvs_setup_interrupt(device_t dev); static void mvs_intr(void *data); static int mvs_suspend(device_t dev); static int mvs_resume(device_t dev); static int mvs_ctlr_setup(device_t dev); static struct { uint32_t id; uint8_t rev; const char *name; int ports; int quirks; } mvs_ids[] = { {0x504011ab, 0x00, "Marvell 88SX5040", 4, MVS_Q_GENI}, {0x504111ab, 0x00, "Marvell 88SX5041", 4, MVS_Q_GENI}, {0x508011ab, 0x00, "Marvell 88SX5080", 8, MVS_Q_GENI}, {0x508111ab, 0x00, "Marvell 88SX5081", 8, MVS_Q_GENI}, {0x604011ab, 0x00, "Marvell 88SX6040", 4, MVS_Q_GENII}, {0x604111ab, 0x00, "Marvell 88SX6041", 4, MVS_Q_GENII}, {0x604211ab, 0x00, "Marvell 88SX6042", 4, MVS_Q_GENIIE}, {0x608011ab, 0x00, "Marvell 88SX6080", 8, MVS_Q_GENII}, {0x608111ab, 0x00, "Marvell 88SX6081", 8, MVS_Q_GENII}, {0x704211ab, 0x00, "Marvell 88SX7042", 4, MVS_Q_GENIIE|MVS_Q_CT}, {0x02419005, 0x00, "Adaptec 1420SA", 4, MVS_Q_GENII}, {0x02439005, 0x00, "Adaptec 1430SA", 4, MVS_Q_GENIIE|MVS_Q_CT}, {0x00000000, 0x00, NULL, 0, 0} }; static int mvs_probe(device_t dev) { char buf[64]; int i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); for (i = 0; mvs_ids[i].id != 0; i++) { if (mvs_ids[i].id == devid && mvs_ids[i].rev <= revid) { snprintf(buf, sizeof(buf), "%s SATA controller", mvs_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int mvs_attach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); device_t child; int error, unit, i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); ctlr->dev = dev; i = 0; while (mvs_ids[i].id != 0 && (mvs_ids[i].id != devid || mvs_ids[i].rev > revid)) i++; ctlr->channels = mvs_ids[i].ports; ctlr->quirks = mvs_ids[i].quirks; ctlr->ccc = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "ccc", &ctlr->ccc); ctlr->cccc = 8; resource_int_value(device_get_name(dev), device_get_unit(dev), "cccc", &ctlr->cccc); if (ctlr->ccc == 0 || ctlr->cccc == 0) { ctlr->ccc = 0; ctlr->cccc = 0; } if (ctlr->ccc > 100000) ctlr->ccc = 100000; device_printf(dev, "Gen-%s, %d %sGbps ports, Port Multiplier %s%s\n", ((ctlr->quirks & MVS_Q_GENI) ? "I" : ((ctlr->quirks & MVS_Q_GENII) ? "II" : "IIe")), ctlr->channels, ((ctlr->quirks & MVS_Q_GENI) ? "1.5" : "3"), ((ctlr->quirks & MVS_Q_GENI) ? "not supported" : "supported"), ((ctlr->quirks & MVS_Q_GENIIE) ? " with FBS" : "")); mtx_init(&ctlr->mtx, "MVS controller lock", NULL, MTX_DEF); /* We should have a memory BAR(0). */ ctlr->r_rid = PCIR_BAR(0); if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return ENXIO; /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return (error); } pci_enable_busmaster(dev); mvs_ctlr_setup(dev); /* Setup interrupts. */ if (mvs_setup_interrupt(dev)) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return ENXIO; } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "mvsch", -1); if (child == NULL) device_printf(dev, "failed to add channel device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); } bus_generic_attach(dev); return 0; } static int mvs_detach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); /* Detach & delete all children */ device_delete_children(dev); /* Free interrupt. */ if (ctlr->irq.r_irq) { bus_teardown_intr(dev, ctlr->irq.r_irq, ctlr->irq.handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); } pci_release_msi(dev); /* Free memory. */ rman_fini(&ctlr->sc_iomem); if (ctlr->r_mem) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); mtx_destroy(&ctlr->mtx); return (0); } static int mvs_ctlr_setup(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); int i, ccc = ctlr->ccc, cccc = ctlr->cccc, ccim = 0; /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_MIM, 0x00000000); /* Mask PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIM, 0x00000000); /* Clear PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIC, 0x00000000); if (ccc && bootverbose) { device_printf(dev, "CCC with %dus/%dcmd enabled\n", ctlr->ccc, ctlr->cccc); } ccc *= 150; /* Configure chip-global CCC */ if (ctlr->channels > 4 && (ctlr->quirks & MVS_Q_GENI) == 0) { ATA_OUTL(ctlr->r_mem, CHIP_ICT, cccc); ATA_OUTL(ctlr->r_mem, CHIP_ITT, ccc); ATA_OUTL(ctlr->r_mem, CHIP_ICC, ~CHIP_ICC_ALL_PORTS); if (ccc) ccim |= IC_ALL_PORTS_COAL_DONE; ccc = 0; cccc = 0; } for (i = 0; i < ctlr->channels / 4; i++) { /* Configure per-HC CCC */ ATA_OUTL(ctlr->r_mem, HC_BASE(i) + HC_ICT, cccc); ATA_OUTL(ctlr->r_mem, HC_BASE(i) + HC_ITT, ccc); if (ccc) ccim |= (IC_HC0_COAL_DONE << (i * IC_HC_SHIFT)); /* Clear HC interrupts */ ATA_OUTL(ctlr->r_mem, HC_BASE(i) + HC_IC, 0x00000000); } /* Enable chip interrupts */ ctlr->gmim = (ccim ? ccim : (IC_DONE_HC0 | IC_DONE_HC1)) | IC_ERR_HC0 | IC_ERR_HC1; ctlr->mim = ctlr->gmim | ctlr->pmim; ATA_OUTL(ctlr->r_mem, CHIP_MIM, ctlr->mim); /* Enable PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIM, 0x007fffff); return (0); } static void mvs_edma(device_t dev, device_t child, int mode) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; int bit = IC_DONE_IRQ << (unit * 2 + unit / 4) ; if (ctlr->ccc == 0) return; /* CCC is not working for non-EDMA mode. Unmask device interrupts. */ mtx_lock(&ctlr->mtx); if (mode == MVS_EDMA_OFF) ctlr->pmim |= bit; else ctlr->pmim &= ~bit; ctlr->mim = ctlr->gmim | ctlr->pmim; if (!ctlr->msia) ATA_OUTL(ctlr->r_mem, CHIP_MIM, ctlr->mim); mtx_unlock(&ctlr->mtx); } static int mvs_suspend(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); bus_generic_suspend(dev); /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_MIM, 0x00000000); /* Mask PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIM, 0x00000000); return 0; } static int mvs_resume(device_t dev) { mvs_ctlr_setup(dev); return (bus_generic_resume(dev)); } static int mvs_setup_interrupt(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); int msi = 0; /* Process hints. */ resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &msi); if (msi < 0) msi = 0; else if (msi > 0) msi = min(1, pci_msi_count(dev)); /* Allocate MSI if needed/present. */ if (msi && pci_alloc_msi(dev, &msi) != 0) msi = 0; ctlr->msi = msi; /* Allocate all IRQs. */ ctlr->irq.r_irq_rid = msi ? 1 : 0; if (!(ctlr->irq.r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irq.r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return (ENXIO); } if ((bus_setup_intr(dev, ctlr->irq.r_irq, ATA_INTR_FLAGS, NULL, mvs_intr, ctlr, &ctlr->irq.handle))) { device_printf(dev, "unable to setup interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); ctlr->irq.r_irq = NULL; return (ENXIO); } return (0); } /* * Common case interrupt handler. */ static void mvs_intr(void *data) { struct mvs_controller *ctlr = data; struct mvs_intr_arg arg; void (*function)(void *); int p; u_int32_t ic, aic; ic = ATA_INL(ctlr->r_mem, CHIP_MIC); if (ctlr->msi) { /* We have to mask MSI during processing. */ mtx_lock(&ctlr->mtx); ATA_OUTL(ctlr->r_mem, CHIP_MIM, 0); ctlr->msia = 1; /* Deny MIM update during processing. */ mtx_unlock(&ctlr->mtx); } else if (ic == 0) return; /* Acknowledge all-ports CCC interrupt. */ if (ic & IC_ALL_PORTS_COAL_DONE) ATA_OUTL(ctlr->r_mem, CHIP_ICC, ~CHIP_ICC_ALL_PORTS); for (p = 0; p < ctlr->channels; p++) { if ((p & 3) == 0) { if (p != 0) ic >>= 1; if ((ic & IC_HC0) == 0) { p += 3; ic >>= 8; continue; } /* Acknowledge interrupts of this HC. */ aic = 0; if (ic & (IC_DONE_IRQ << 0)) aic |= HC_IC_DONE(0) | HC_IC_DEV(0); if (ic & (IC_DONE_IRQ << 2)) aic |= HC_IC_DONE(1) | HC_IC_DEV(1); if (ic & (IC_DONE_IRQ << 4)) aic |= HC_IC_DONE(2) | HC_IC_DEV(2); if (ic & (IC_DONE_IRQ << 6)) aic |= HC_IC_DONE(3) | HC_IC_DEV(3); if (ic & IC_HC0_COAL_DONE) aic |= HC_IC_COAL; ATA_OUTL(ctlr->r_mem, HC_BASE(p == 4) + HC_IC, ~aic); } /* Call per-port interrupt handler. */ arg.cause = ic & (IC_ERR_IRQ|IC_DONE_IRQ); if ((arg.cause != 0) && (function = ctlr->interrupt[p].function)) { arg.arg = ctlr->interrupt[p].argument; function(&arg); } ic >>= 2; } if (ctlr->msi) { /* Unmasking MSI triggers next interrupt, if needed. */ mtx_lock(&ctlr->mtx); ctlr->msia = 0; /* Allow MIM update. */ ATA_OUTL(ctlr->r_mem, CHIP_MIM, ctlr->mim); mtx_unlock(&ctlr->mtx); } } static struct resource * mvs_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; struct resource *res = NULL; int offset = HC_BASE(unit >> 2) + PORT_BASE(unit & 0x03); rman_res_t st; switch (type) { case SYS_RES_MEMORY: st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + PORT_SIZE - 1, PORT_SIZE, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, PORT_SIZE, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irq.r_irq; break; } return (res); } static int mvs_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { switch (type) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: if (rid != ATA_IRQ_RID) return ENOENT; return (0); } return (EINVAL); } static int mvs_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); if (filter != NULL) { printf("mvs.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } static int mvs_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } static int mvs_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at channel %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } static int -mvs_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) +mvs_child_location(device_t dev, device_t child, struct sbuf *sb) { - snprintf(buf, buflen, "channel=%d", + sbuf_printf(sb, "channel=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static bus_dma_tag_t mvs_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } static device_method_t mvs_methods[] = { DEVMETHOD(device_probe, mvs_probe), DEVMETHOD(device_attach, mvs_attach), DEVMETHOD(device_detach, mvs_detach), DEVMETHOD(device_suspend, mvs_suspend), DEVMETHOD(device_resume, mvs_resume), DEVMETHOD(bus_print_child, mvs_print_child), DEVMETHOD(bus_alloc_resource, mvs_alloc_resource), DEVMETHOD(bus_release_resource, mvs_release_resource), DEVMETHOD(bus_setup_intr, mvs_setup_intr), DEVMETHOD(bus_teardown_intr,mvs_teardown_intr), - DEVMETHOD(bus_child_location_str, mvs_child_location_str), + DEVMETHOD(bus_child_location, mvs_child_location), DEVMETHOD(bus_get_dma_tag, mvs_get_dma_tag), DEVMETHOD(mvs_edma, mvs_edma), { 0, 0 } }; static driver_t mvs_driver = { "mvs", mvs_methods, sizeof(struct mvs_controller) }; DRIVER_MODULE(mvs, pci, mvs_driver, mvs_devclass, 0, 0); MODULE_PNP_INFO("W32:vendor/device", pci, mvs, mvs_ids, nitems(mvs_ids) - 1); MODULE_VERSION(mvs, 1); MODULE_DEPEND(mvs, cam, 1, 1, 1); diff --git a/sys/dev/mvs/mvs_soc.c b/sys/dev/mvs/mvs_soc.c index 6243c2ce1951..18b78602ab62 100644 --- a/sys/dev/mvs/mvs_soc.c +++ b/sys/dev/mvs/mvs_soc.c @@ -1,471 +1,470 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include "mvs.h" /* local prototypes */ static int mvs_setup_interrupt(device_t dev); static void mvs_intr(void *data); static int mvs_suspend(device_t dev); static int mvs_resume(device_t dev); static int mvs_ctlr_setup(device_t dev); static struct { uint32_t id; uint8_t rev; const char *name; int ports; int quirks; } mvs_ids[] = { {MV_DEV_88F5182, 0x00, "Marvell 88F5182", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_88F6281, 0x00, "Marvell 88F6281", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_88F6282, 0x00, "Marvell 88F6282", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_MV78100, 0x00, "Marvell MV78100", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_MV78100_Z0, 0x00,"Marvell MV78100", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_MV78260, 0x00, "Marvell MV78260", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_MV78460, 0x00, "Marvell MV78460", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {0, 0x00, NULL, 0, 0} }; static int mvs_probe(device_t dev) { char buf[64]; int i; uint32_t devid, revid; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "mrvl,sata")) return (ENXIO); soc_id(&devid, &revid); for (i = 0; mvs_ids[i].id != 0; i++) { if (mvs_ids[i].id == devid && mvs_ids[i].rev <= revid) { snprintf(buf, sizeof(buf), "%s SATA controller", mvs_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int mvs_attach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); device_t child; int error, unit, i; uint32_t devid, revid; soc_id(&devid, &revid); ctlr->dev = dev; i = 0; while (mvs_ids[i].id != 0 && (mvs_ids[i].id != devid || mvs_ids[i].rev > revid)) i++; ctlr->channels = mvs_ids[i].ports; ctlr->quirks = mvs_ids[i].quirks; ctlr->ccc = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "ccc", &ctlr->ccc); ctlr->cccc = 8; resource_int_value(device_get_name(dev), device_get_unit(dev), "cccc", &ctlr->cccc); if (ctlr->ccc == 0 || ctlr->cccc == 0) { ctlr->ccc = 0; ctlr->cccc = 0; } if (ctlr->ccc > 100000) ctlr->ccc = 100000; device_printf(dev, "Gen-%s, %d %sGbps ports, Port Multiplier %s%s\n", ((ctlr->quirks & MVS_Q_GENI) ? "I" : ((ctlr->quirks & MVS_Q_GENII) ? "II" : "IIe")), ctlr->channels, ((ctlr->quirks & MVS_Q_GENI) ? "1.5" : "3"), ((ctlr->quirks & MVS_Q_GENI) ? "not supported" : "supported"), ((ctlr->quirks & MVS_Q_GENIIE) ? " with FBS" : "")); mtx_init(&ctlr->mtx, "MVS controller lock", NULL, MTX_DEF); /* We should have a memory BAR(0). */ ctlr->r_rid = 0; if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return ENXIO; if (ATA_INL(ctlr->r_mem, PORT_BASE(0) + SATA_PHYCFG_OFS) != 0) ctlr->quirks |= MVS_Q_SOC65; /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return (error); } mvs_ctlr_setup(dev); /* Setup interrupts. */ if (mvs_setup_interrupt(dev)) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return ENXIO; } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "mvsch", -1); if (child == NULL) device_printf(dev, "failed to add channel device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); } bus_generic_attach(dev); return 0; } static int mvs_detach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); /* Detach & delete all children */ device_delete_children(dev); /* Free interrupt. */ if (ctlr->irq.r_irq) { bus_teardown_intr(dev, ctlr->irq.r_irq, ctlr->irq.handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); } /* Free memory. */ rman_fini(&ctlr->sc_iomem); if (ctlr->r_mem) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); mtx_destroy(&ctlr->mtx); return (0); } static int mvs_ctlr_setup(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); int ccc = ctlr->ccc, cccc = ctlr->cccc, ccim = 0; /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIM, 0x00000000); /* Clear HC interrupts */ ATA_OUTL(ctlr->r_mem, HC_IC, 0x00000000); /* Clear chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIC, 0); /* Configure per-HC CCC */ if (ccc && bootverbose) { device_printf(dev, "CCC with %dus/%dcmd enabled\n", ctlr->ccc, ctlr->cccc); } ccc *= 150; ATA_OUTL(ctlr->r_mem, HC_ICT, cccc); ATA_OUTL(ctlr->r_mem, HC_ITT, ccc); if (ccc) ccim |= IC_HC0_COAL_DONE; /* Enable chip interrupts */ ctlr->gmim = ((ccc ? IC_HC0_COAL_DONE : (IC_DONE_HC0 & CHIP_SOC_HC0_MASK(ctlr->channels))) | (IC_ERR_HC0 & CHIP_SOC_HC0_MASK(ctlr->channels))); ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIM, ctlr->gmim | ctlr->pmim); return (0); } static void mvs_edma(device_t dev, device_t child, int mode) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; int bit = IC_DONE_IRQ << (unit * 2); if (ctlr->ccc == 0) return; /* CCC is not working for non-EDMA mode. Unmask device interrupts. */ mtx_lock(&ctlr->mtx); if (mode == MVS_EDMA_OFF) ctlr->pmim |= bit; else ctlr->pmim &= ~bit; ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIM, ctlr->gmim | ctlr->pmim); mtx_unlock(&ctlr->mtx); } static int mvs_suspend(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); bus_generic_suspend(dev); /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIM, 0x00000000); return 0; } static int mvs_resume(device_t dev) { mvs_ctlr_setup(dev); return (bus_generic_resume(dev)); } static int mvs_setup_interrupt(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); /* Allocate all IRQs. */ ctlr->irq.r_irq_rid = 0; if (!(ctlr->irq.r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irq.r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return (ENXIO); } if ((bus_setup_intr(dev, ctlr->irq.r_irq, ATA_INTR_FLAGS, NULL, mvs_intr, ctlr, &ctlr->irq.handle))) { device_printf(dev, "unable to setup interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); ctlr->irq.r_irq = NULL; return (ENXIO); } return (0); } /* * Common case interrupt handler. */ static void mvs_intr(void *data) { struct mvs_controller *ctlr = data; struct mvs_intr_arg arg; void (*function)(void *); int p, chan_num; u_int32_t ic, aic; ic = ATA_INL(ctlr->r_mem, CHIP_SOC_MIC); if ((ic & IC_HC0) == 0) return; /* Acknowledge interrupts of this HC. */ aic = 0; /* Processing interrupts from each initialized channel */ for (chan_num = 0; chan_num < ctlr->channels; chan_num++) { if (ic & (IC_DONE_IRQ << (chan_num * 2))) aic |= HC_IC_DONE(chan_num) | HC_IC_DEV(chan_num); } if (ic & IC_HC0_COAL_DONE) aic |= HC_IC_COAL; ATA_OUTL(ctlr->r_mem, HC_IC, ~aic); /* Call per-port interrupt handler. */ for (p = 0; p < ctlr->channels; p++) { arg.cause = ic & (IC_ERR_IRQ|IC_DONE_IRQ); if ((arg.cause != 0) && (function = ctlr->interrupt[p].function)) { arg.arg = ctlr->interrupt[p].argument; function(&arg); } ic >>= 2; } } static struct resource * mvs_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; struct resource *res = NULL; int offset = PORT_BASE(unit & 0x03); rman_res_t st; switch (type) { case SYS_RES_MEMORY: st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + PORT_SIZE - 1, PORT_SIZE, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, PORT_SIZE, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irq.r_irq; break; } return (res); } static int mvs_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { switch (type) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: if (rid != ATA_IRQ_RID) return ENOENT; return (0); } return (EINVAL); } static int mvs_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); if (filter != NULL) { printf("mvs.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } static int mvs_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } static int mvs_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at channel %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } static int -mvs_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) +mvs_child_location(device_t dev, device_t child, struct sbuf *sb) { - snprintf(buf, buflen, "channel=%d", - (int)(intptr_t)device_get_ivars(child)); + sbuf_printf(sb, "channel=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static bus_dma_tag_t mvs_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } static device_method_t mvs_methods[] = { DEVMETHOD(device_probe, mvs_probe), DEVMETHOD(device_attach, mvs_attach), DEVMETHOD(device_detach, mvs_detach), DEVMETHOD(device_suspend, mvs_suspend), DEVMETHOD(device_resume, mvs_resume), DEVMETHOD(bus_print_child, mvs_print_child), DEVMETHOD(bus_alloc_resource, mvs_alloc_resource), DEVMETHOD(bus_release_resource, mvs_release_resource), DEVMETHOD(bus_setup_intr, mvs_setup_intr), DEVMETHOD(bus_teardown_intr,mvs_teardown_intr), - DEVMETHOD(bus_child_location_str, mvs_child_location_str), + DEVMETHOD(bus_child_location, mvs_child_location), DEVMETHOD(bus_get_dma_tag, mvs_get_dma_tag), DEVMETHOD(mvs_edma, mvs_edma), { 0, 0 } }; static driver_t mvs_driver = { "mvs", mvs_methods, sizeof(struct mvs_controller) }; DRIVER_MODULE(mvs, simplebus, mvs_driver, mvs_devclass, 0, 0); MODULE_VERSION(mvs, 1); MODULE_DEPEND(mvs, cam, 1, 1, 1); diff --git a/sys/dev/ntb/ntb.c b/sys/dev/ntb/ntb.c index d6b564725aaf..b765da213e2c 100644 --- a/sys/dev/ntb/ntb.c +++ b/sys/dev/ntb/ntb.c @@ -1,552 +1,552 @@ /*- * Copyright (c) 2016-2017 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include +#include #include #include "ntb.h" devclass_t ntb_hw_devclass; SYSCTL_NODE(_hw, OID_AUTO, ntb, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "NTB sysctls"); struct ntb_child { device_t dev; int function; int enabled; int mwoff; int mwcnt; int spadoff; int spadcnt; int dboff; int dbcnt; uint64_t dbmask; void *ctx; const struct ntb_ctx_ops *ctx_ops; struct rmlock ctx_lock; struct ntb_child *next; }; int ntb_register_device(device_t dev) { struct ntb_child **cpp = device_get_softc(dev); struct ntb_child *nc; int i, mw, mwu, mwt, spad, spadu, spadt, db, dbu, dbt; char cfg[128] = ""; char buf[32]; char *n, *np, *c, *p, *name; mwu = 0; mwt = NTB_MW_COUNT(dev); spadu = 0; spadt = NTB_SPAD_COUNT(dev); dbu = 0; dbt = flsll(NTB_DB_VALID_MASK(dev)); device_printf(dev, "%d memory windows, %d scratchpads, " "%d doorbells\n", mwt, spadt, dbt); snprintf(buf, sizeof(buf), "hint.%s.%d.config", device_get_name(dev), device_get_unit(dev)); TUNABLE_STR_FETCH(buf, cfg, sizeof(cfg)); n = cfg; i = 0; while ((c = strsep(&n, ",")) != NULL) { np = c; name = strsep(&np, ":"); if (name != NULL && name[0] == 0) name = NULL; p = strsep(&np, ":"); mw = (p && p[0] != 0) ? strtol(p, NULL, 10) : mwt - mwu; p = strsep(&np, ":"); spad = (p && p[0] != 0) ? strtol(p, NULL, 10) : spadt - spadu; db = (np && np[0] != 0) ? strtol(np, NULL, 10) : dbt - dbu; if (mw > mwt - mwu || spad > spadt - spadu || db > dbt - dbu) { device_printf(dev, "Not enough resources for config\n"); break; } nc = malloc(sizeof(*nc), M_DEVBUF, M_WAITOK | M_ZERO); nc->function = i; nc->mwoff = mwu; nc->mwcnt = mw; nc->spadoff = spadu; nc->spadcnt = spad; nc->dboff = dbu; nc->dbcnt = db; nc->dbmask = (db == 0) ? 0 : (0xffffffffffffffff >> (64 - db)); rm_init(&nc->ctx_lock, "ntb ctx"); nc->dev = device_add_child(dev, name, -1); if (nc->dev == NULL) { ntb_unregister_device(dev); return (ENOMEM); } device_set_ivars(nc->dev, nc); *cpp = nc; cpp = &nc->next; if (bootverbose) { device_printf(dev, "%d \"%s\":", i, name); if (mw > 0) { printf(" memory windows %d", mwu); if (mw > 1) printf("-%d", mwu + mw - 1); } if (spad > 0) { printf(" scratchpads %d", spadu); if (spad > 1) printf("-%d", spadu + spad - 1); } if (db > 0) { printf(" doorbells %d", dbu); if (db > 1) printf("-%d", dbu + db - 1); } printf("\n"); } mwu += mw; spadu += spad; dbu += db; i++; } bus_generic_attach(dev); return (0); } int ntb_unregister_device(device_t dev) { struct ntb_child **cpp = device_get_softc(dev); struct ntb_child *nc; int error = 0; while ((nc = *cpp) != NULL) { *cpp = (*cpp)->next; error = device_delete_child(dev, nc->dev); if (error) break; rm_destroy(&nc->ctx_lock); free(nc, M_DEVBUF); } return (error); } int -ntb_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) +ntb_child_location(device_t dev, device_t child, struct sbuf *sb) { struct ntb_child *nc = device_get_ivars(child); - snprintf(buf, buflen, "function=%d", nc->function); + sbuf_printf(sb, "function=%d", nc->function); return (0); } int ntb_print_child(device_t dev, device_t child) { struct ntb_child *nc = device_get_ivars(child); int retval; retval = bus_print_child_header(dev, child); if (nc->mwcnt > 0) { printf(" mw %d", nc->mwoff); if (nc->mwcnt > 1) printf("-%d", nc->mwoff + nc->mwcnt - 1); } if (nc->spadcnt > 0) { printf(" spad %d", nc->spadoff); if (nc->spadcnt > 1) printf("-%d", nc->spadoff + nc->spadcnt - 1); } if (nc->dbcnt > 0) { printf(" db %d", nc->dboff); if (nc->dbcnt > 1) printf("-%d", nc->dboff + nc->dbcnt - 1); } retval += printf(" at function %d", nc->function); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } bus_dma_tag_t ntb_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } void ntb_link_event(device_t dev) { struct ntb_child **cpp = device_get_softc(dev); struct ntb_child *nc; struct rm_priotracker ctx_tracker; enum ntb_speed speed; enum ntb_width width; if (NTB_LINK_IS_UP(dev, &speed, &width)) { device_printf(dev, "Link is up (PCIe %d.x / x%d)\n", (int)speed, (int)width); } else { device_printf(dev, "Link is down\n"); } for (nc = *cpp; nc != NULL; nc = nc->next) { rm_rlock(&nc->ctx_lock, &ctx_tracker); if (nc->ctx_ops != NULL && nc->ctx_ops->link_event != NULL) nc->ctx_ops->link_event(nc->ctx); rm_runlock(&nc->ctx_lock, &ctx_tracker); } } void ntb_db_event(device_t dev, uint32_t vec) { struct ntb_child **cpp = device_get_softc(dev); struct ntb_child *nc; struct rm_priotracker ctx_tracker; for (nc = *cpp; nc != NULL; nc = nc->next) { rm_rlock(&nc->ctx_lock, &ctx_tracker); if (nc->ctx_ops != NULL && nc->ctx_ops->db_event != NULL) nc->ctx_ops->db_event(nc->ctx, vec); rm_runlock(&nc->ctx_lock, &ctx_tracker); } } int ntb_port_number(device_t ntb) { return (NTB_PORT_NUMBER(device_get_parent(ntb))); } int ntb_peer_port_count(device_t ntb) { return (NTB_PEER_PORT_COUNT(device_get_parent(ntb))); } int ntb_peer_port_number(device_t ntb, int pidx) { return (NTB_PEER_PORT_NUMBER(device_get_parent(ntb), pidx)); } int ntb_peer_port_idx(device_t ntb, int port) { return (NTB_PEER_PORT_IDX(device_get_parent(ntb), port)); } bool ntb_link_is_up(device_t ntb, enum ntb_speed *speed, enum ntb_width *width) { return (NTB_LINK_IS_UP(device_get_parent(ntb), speed, width)); } int ntb_link_enable(device_t ntb, enum ntb_speed speed, enum ntb_width width) { struct ntb_child *nc = device_get_ivars(ntb); struct ntb_child **cpp = device_get_softc(device_get_parent(nc->dev)); struct ntb_child *nc1; for (nc1 = *cpp; nc1 != NULL; nc1 = nc1->next) { if (nc1->enabled) { nc->enabled = 1; return (0); } } nc->enabled = 1; return (NTB_LINK_ENABLE(device_get_parent(ntb), speed, width)); } int ntb_link_disable(device_t ntb) { struct ntb_child *nc = device_get_ivars(ntb); struct ntb_child **cpp = device_get_softc(device_get_parent(nc->dev)); struct ntb_child *nc1; if (!nc->enabled) return (0); nc->enabled = 0; for (nc1 = *cpp; nc1 != NULL; nc1 = nc1->next) { if (nc1->enabled) return (0); } return (NTB_LINK_DISABLE(device_get_parent(ntb))); } bool ntb_link_enabled(device_t ntb) { struct ntb_child *nc = device_get_ivars(ntb); return (nc->enabled && NTB_LINK_ENABLED(device_get_parent(ntb))); } int ntb_set_ctx(device_t ntb, void *ctx, const struct ntb_ctx_ops *ctx_ops) { struct ntb_child *nc = device_get_ivars(ntb); if (ctx == NULL || ctx_ops == NULL) return (EINVAL); rm_wlock(&nc->ctx_lock); if (nc->ctx_ops != NULL) { rm_wunlock(&nc->ctx_lock); return (EINVAL); } nc->ctx = ctx; nc->ctx_ops = ctx_ops; /* * If applicaiton driver asks for link events, generate fake one now * to let it update link state without races while we hold the lock. */ if (ctx_ops->link_event != NULL) ctx_ops->link_event(ctx); rm_wunlock(&nc->ctx_lock); return (0); } void * ntb_get_ctx(device_t ntb, const struct ntb_ctx_ops **ctx_ops) { struct ntb_child *nc = device_get_ivars(ntb); KASSERT(nc->ctx != NULL && nc->ctx_ops != NULL, ("bogus")); if (ctx_ops != NULL) *ctx_ops = nc->ctx_ops; return (nc->ctx); } void ntb_clear_ctx(device_t ntb) { struct ntb_child *nc = device_get_ivars(ntb); rm_wlock(&nc->ctx_lock); nc->ctx = NULL; nc->ctx_ops = NULL; rm_wunlock(&nc->ctx_lock); } uint8_t ntb_mw_count(device_t ntb) { struct ntb_child *nc = device_get_ivars(ntb); return (nc->mwcnt); } int ntb_mw_get_range(device_t ntb, unsigned mw_idx, vm_paddr_t *base, caddr_t *vbase, size_t *size, size_t *align, size_t *align_size, bus_addr_t *plimit) { struct ntb_child *nc = device_get_ivars(ntb); return (NTB_MW_GET_RANGE(device_get_parent(ntb), mw_idx + nc->mwoff, base, vbase, size, align, align_size, plimit)); } int ntb_mw_set_trans(device_t ntb, unsigned mw_idx, bus_addr_t addr, size_t size) { struct ntb_child *nc = device_get_ivars(ntb); return (NTB_MW_SET_TRANS(device_get_parent(ntb), mw_idx + nc->mwoff, addr, size)); } int ntb_mw_clear_trans(device_t ntb, unsigned mw_idx) { struct ntb_child *nc = device_get_ivars(ntb); return (NTB_MW_CLEAR_TRANS(device_get_parent(ntb), mw_idx + nc->mwoff)); } int ntb_mw_get_wc(device_t ntb, unsigned mw_idx, vm_memattr_t *mode) { struct ntb_child *nc = device_get_ivars(ntb); return (NTB_MW_GET_WC(device_get_parent(ntb), mw_idx + nc->mwoff, mode)); } int ntb_mw_set_wc(device_t ntb, unsigned mw_idx, vm_memattr_t mode) { struct ntb_child *nc = device_get_ivars(ntb); return (NTB_MW_SET_WC(device_get_parent(ntb), mw_idx + nc->mwoff, mode)); } uint8_t ntb_spad_count(device_t ntb) { struct ntb_child *nc = device_get_ivars(ntb); return (nc->spadcnt); } void ntb_spad_clear(device_t ntb) { struct ntb_child *nc = device_get_ivars(ntb); unsigned i; for (i = 0; i < nc->spadcnt; i++) NTB_SPAD_WRITE(device_get_parent(ntb), i + nc->spadoff, 0); } int ntb_spad_write(device_t ntb, unsigned int idx, uint32_t val) { struct ntb_child *nc = device_get_ivars(ntb); return (NTB_SPAD_WRITE(device_get_parent(ntb), idx + nc->spadoff, val)); } int ntb_spad_read(device_t ntb, unsigned int idx, uint32_t *val) { struct ntb_child *nc = device_get_ivars(ntb); return (NTB_SPAD_READ(device_get_parent(ntb), idx + nc->spadoff, val)); } int ntb_peer_spad_write(device_t ntb, unsigned int idx, uint32_t val) { struct ntb_child *nc = device_get_ivars(ntb); return (NTB_PEER_SPAD_WRITE(device_get_parent(ntb), idx + nc->spadoff, val)); } int ntb_peer_spad_read(device_t ntb, unsigned int idx, uint32_t *val) { struct ntb_child *nc = device_get_ivars(ntb); return (NTB_PEER_SPAD_READ(device_get_parent(ntb), idx + nc->spadoff, val)); } uint64_t ntb_db_valid_mask(device_t ntb) { struct ntb_child *nc = device_get_ivars(ntb); return (nc->dbmask); } int ntb_db_vector_count(device_t ntb) { return (NTB_DB_VECTOR_COUNT(device_get_parent(ntb))); } uint64_t ntb_db_vector_mask(device_t ntb, uint32_t vector) { struct ntb_child *nc = device_get_ivars(ntb); return ((NTB_DB_VECTOR_MASK(device_get_parent(ntb), vector) >> nc->dboff) & nc->dbmask); } int ntb_peer_db_addr(device_t ntb, bus_addr_t *db_addr, vm_size_t *db_size) { return (NTB_PEER_DB_ADDR(device_get_parent(ntb), db_addr, db_size)); } void ntb_db_clear(device_t ntb, uint64_t bits) { struct ntb_child *nc = device_get_ivars(ntb); return (NTB_DB_CLEAR(device_get_parent(ntb), bits << nc->dboff)); } void ntb_db_clear_mask(device_t ntb, uint64_t bits) { struct ntb_child *nc = device_get_ivars(ntb); return (NTB_DB_CLEAR_MASK(device_get_parent(ntb), bits << nc->dboff)); } uint64_t ntb_db_read(device_t ntb) { struct ntb_child *nc = device_get_ivars(ntb); return ((NTB_DB_READ(device_get_parent(ntb)) >> nc->dboff) & nc->dbmask); } void ntb_db_set_mask(device_t ntb, uint64_t bits) { struct ntb_child *nc = device_get_ivars(ntb); return (NTB_DB_SET_MASK(device_get_parent(ntb), bits << nc->dboff)); } void ntb_peer_db_set(device_t ntb, uint64_t bits) { struct ntb_child *nc = device_get_ivars(ntb); return (NTB_PEER_DB_SET(device_get_parent(ntb), bits << nc->dboff)); } MODULE_VERSION(ntb, 1); diff --git a/sys/dev/ntb/ntb.h b/sys/dev/ntb/ntb.h index 3c31d48276b6..8883d52ce1ce 100644 --- a/sys/dev/ntb/ntb.h +++ b/sys/dev/ntb/ntb.h @@ -1,458 +1,457 @@ /*- * Copyright (c) 2016 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _NTB_H_ #define _NTB_H_ #include "ntb_if.h" extern devclass_t ntb_hw_devclass; SYSCTL_DECL(_hw_ntb); int ntb_register_device(device_t ntb); int ntb_unregister_device(device_t ntb); -int ntb_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen); +int ntb_child_location(device_t dev, device_t child, struct sbuf *sb); int ntb_print_child(device_t dev, device_t child); bus_dma_tag_t ntb_get_dma_tag(device_t bus, device_t child); /* * ntb_link_event() - notify driver context of a change in link status * @ntb: NTB device context * * Notify the driver context that the link status may have changed. The driver * should call intb_link_is_up() to get the current status. */ void ntb_link_event(device_t ntb); /* * ntb_db_event() - notify driver context of a doorbell event * @ntb: NTB device context * @vector: Interrupt vector number * * Notify the driver context of a doorbell event. If hardware supports * multiple interrupt vectors for doorbells, the vector number indicates which * vector received the interrupt. The vector number is relative to the first * vector used for doorbells, starting at zero, and must be less than * ntb_db_vector_count(). The driver may call ntb_db_read() to check which * doorbell bits need service, and ntb_db_vector_mask() to determine which of * those bits are associated with the vector number. */ void ntb_db_event(device_t ntb, uint32_t vec); /** * ntb_port_number() - get the local port number * @ntb: NTB device context. * * Hardware driver returns local port number in compliance with topology. * * Return: the local port number */ int ntb_port_number(device_t ntb); /** * ntb_port_count() - get the number of peer device ports * @ntb: NTB device context. * * By default hardware driver supports just one peer device. * * Return: the number of peer ports */ int ntb_peer_port_count(device_t ntb); /** * ntb_peer_port_number() - get the peer port by given index * @ntb: NTB device context. * @idx: Peer port index (should be zero for now). * * By default hardware driver supports just one peer device, so this method * shall return the corresponding value. * * Return: the peer device port or an error number */ int ntb_peer_port_number(device_t ntb, int pidx); /* * ntb_peer_port_idx() - get the peer device port index by given port * number * @ntb: NTB device context. * @port: Peer port number * * By default hardware driver supports just one peer device, so given a * valid peer port number, the return value shall be zero. * * Return: the peer port index or an error number */ int ntb_peer_port_idx(device_t ntb, int port); /* * ntb_link_is_up() - get the current ntb link state * @ntb: NTB device context * @speed: OUT - The link speed expressed as PCIe generation number * @width: OUT - The link width expressed as the number of PCIe lanes * * RETURNS: true or false based on the hardware link state */ bool ntb_link_is_up(device_t ntb, enum ntb_speed *speed, enum ntb_width *width); /* * ntb_link_enable() - enable the link on the secondary side of the ntb * @ntb: NTB device context * @max_speed: The maximum link speed expressed as PCIe generation number[0] * @max_width: The maximum link width expressed as the number of PCIe lanes[0] * * Enable the link on the secondary side of the ntb. This can only be done * from the primary side of the ntb in primary or b2b topology. The ntb device * should train the link to its maximum speed and width, or the requested speed * and width, whichever is smaller, if supported. * * Return: Zero on success, otherwise an error number. * * [0]: Only NTB_SPEED_AUTO and NTB_WIDTH_AUTO are valid inputs; other speed * and width input will be ignored. */ int ntb_link_enable(device_t ntb, enum ntb_speed speed, enum ntb_width width); /* * ntb_link_disable() - disable the link on the secondary side of the ntb * @ntb: NTB device context * * Disable the link on the secondary side of the ntb. This can only be done * from the primary side of the ntb in primary or b2b topology. The ntb device * should disable the link. Returning from this call must indicate that a * barrier has passed, though with no more writes may pass in either direction * across the link, except if this call returns an error number. * * Return: Zero on success, otherwise an error number. */ int ntb_link_disable(device_t ntb); /* * get enable status of the link on the secondary side of the ntb */ bool ntb_link_enabled(device_t ntb); /* * ntb_set_ctx() - associate a driver context with an ntb device * @ntb: NTB device context * @ctx: Driver context * @ctx_ops: Driver context operations * * Associate a driver context and operations with a ntb device. The context is * provided by the client driver, and the driver may associate a different * context with each ntb device. * * Return: Zero if the context is associated, otherwise an error number. */ int ntb_set_ctx(device_t ntb, void *ctx, const struct ntb_ctx_ops *ctx_ops); /* * ntb_set_ctx() - get a driver context associated with an ntb device * @ntb: NTB device context * @ctx_ops: Driver context operations * * Get a driver context and operations associated with a ntb device. */ void * ntb_get_ctx(device_t ntb, const struct ntb_ctx_ops **ctx_ops); /* * ntb_clear_ctx() - disassociate any driver context from an ntb device * @ntb: NTB device context * * Clear any association that may exist between a driver context and the ntb * device. */ void ntb_clear_ctx(device_t ntb); /* * ntb_mw_count() - Get the number of memory windows available for KPI * consumers. * * (Excludes any MW wholly reserved for register access.) */ uint8_t ntb_mw_count(device_t ntb); /* * ntb_mw_get_range() - get the range of a memory window * @ntb: NTB device context * @idx: Memory window number * @base: OUT - the base address for mapping the memory window * @size: OUT - the size for mapping the memory window * @align: OUT - the base alignment for translating the memory window * @align_size: OUT - the size alignment for translating the memory window * * Get the range of a memory window. NULL may be given for any output * parameter if the value is not needed. The base and size may be used for * mapping the memory window, to access the peer memory. The alignment and * size may be used for translating the memory window, for the peer to access * memory on the local system. * * Return: Zero on success, otherwise an error number. */ int ntb_mw_get_range(device_t ntb, unsigned mw_idx, vm_paddr_t *base, caddr_t *vbase, size_t *size, size_t *align, size_t *align_size, bus_addr_t *plimit); /* * ntb_mw_set_trans() - set the translation of a memory window * @ntb: NTB device context * @idx: Memory window number * @addr: The dma address local memory to expose to the peer * @size: The size of the local memory to expose to the peer * * Set the translation of a memory window. The peer may access local memory * through the window starting at the address, up to the size. The address * must be aligned to the alignment specified by ntb_mw_get_range(). The size * must be aligned to the size alignment specified by ntb_mw_get_range(). The * address must be below the plimit specified by ntb_mw_get_range() (i.e. for * 32-bit BARs). * * Return: Zero on success, otherwise an error number. */ int ntb_mw_set_trans(device_t ntb, unsigned mw_idx, bus_addr_t addr, size_t size); /* * ntb_mw_clear_trans() - clear the translation of a memory window * @ntb: NTB device context * @idx: Memory window number * * Clear the translation of a memory window. The peer may no longer access * local memory through the window. * * Return: Zero on success, otherwise an error number. */ int ntb_mw_clear_trans(device_t ntb, unsigned mw_idx); /* * ntb_mw_get_wc - Get the write-combine status of a memory window * * Returns: Zero on success, setting *wc; otherwise an error number (e.g. if * idx is an invalid memory window). * * Mode is a VM_MEMATTR_* type. */ int ntb_mw_get_wc(device_t ntb, unsigned mw_idx, vm_memattr_t *mode); /* * ntb_mw_set_wc - Set the write-combine status of a memory window * * If 'mode' matches the current status, this does nothing and succeeds. Mode * is a VM_MEMATTR_* type. * * Returns: Zero on success, setting the caching attribute on the virtual * mapping of the BAR; otherwise an error number (e.g. if idx is an invalid * memory window, or if changing the caching attribute fails). */ int ntb_mw_set_wc(device_t ntb, unsigned mw_idx, vm_memattr_t mode); /* * ntb_spad_count() - get the total scratch regs usable * @ntb: pointer to ntb_softc instance * * This function returns the max 32bit scratchpad registers usable by the * upper layer. * * RETURNS: total number of scratch pad registers available */ uint8_t ntb_spad_count(device_t ntb); /* * ntb_get_max_spads() - zero local scratch registers * @ntb: pointer to ntb_softc instance * * This functions overwrites all local scratchpad registers with zeroes. */ void ntb_spad_clear(device_t ntb); /* * ntb_spad_write() - write to the secondary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to the scratchpad register, 0 based * @val: the data value to put into the register * * This function allows writing of a 32bit value to the indexed scratchpad * register. The register resides on the secondary (external) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_spad_write(device_t ntb, unsigned int idx, uint32_t val); /* * ntb_spad_read() - read from the primary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to scratchpad register, 0 based * @val: pointer to 32bit integer for storing the register value * * This function allows reading of the 32bit scratchpad register on * the primary (internal) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_spad_read(device_t ntb, unsigned int idx, uint32_t *val); /* * ntb_peer_spad_write() - write to the secondary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to the scratchpad register, 0 based * @val: the data value to put into the register * * This function allows writing of a 32bit value to the indexed scratchpad * register. The register resides on the secondary (external) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_peer_spad_write(device_t ntb, unsigned int idx, uint32_t val); /* * ntb_peer_spad_read() - read from the primary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to scratchpad register, 0 based * @val: pointer to 32bit integer for storing the register value * * This function allows reading of the 32bit scratchpad register on * the primary (internal) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_peer_spad_read(device_t ntb, unsigned int idx, uint32_t *val); /* * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb * @ntb: NTB device context * * Hardware may support different number or arrangement of doorbell bits. * * Return: A mask of doorbell bits supported by the ntb. */ uint64_t ntb_db_valid_mask(device_t ntb); /* * ntb_db_vector_count() - get the number of doorbell interrupt vectors * @ntb: NTB device context. * * Hardware may support different number of interrupt vectors. * * Return: The number of doorbell interrupt vectors. */ int ntb_db_vector_count(device_t ntb); /* * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector * @ntb: NTB device context * @vector: Doorbell vector number * * Each interrupt vector may have a different number or arrangement of bits. * * Return: A mask of doorbell bits serviced by a vector. */ uint64_t ntb_db_vector_mask(device_t ntb, uint32_t vector); /* * ntb_peer_db_addr() - address and size of the peer doorbell register * @ntb: NTB device context. * @db_addr: OUT - The address of the peer doorbell register. * @db_size: OUT - The number of bytes to write the peer doorbell register. * * Return the address of the peer doorbell register. This may be used, for * example, by drivers that offload memory copy operations to a dma engine. * The drivers may wish to ring the peer doorbell at the completion of memory * copy operations. For efficiency, and to simplify ordering of operations * between the dma memory copies and the ringing doorbell, the driver may * append one additional dma memory copy with the doorbell register as the * destination, after the memory copy operations. * * Return: Zero on success, otherwise an error number. * * Note that writing the peer doorbell via a memory window will *not* generate * an interrupt on the remote host; that must be done separately. */ int ntb_peer_db_addr(device_t ntb, bus_addr_t *db_addr, vm_size_t *db_size); /* * ntb_db_clear() - clear bits in the local doorbell register * @ntb: NTB device context. * @db_bits: Doorbell bits to clear. * * Clear bits in the local doorbell register, arming the bits for the next * doorbell. * * Return: Zero on success, otherwise an error number. */ void ntb_db_clear(device_t ntb, uint64_t bits); /* * ntb_db_clear_mask() - clear bits in the local doorbell mask * @ntb: NTB device context. * @db_bits: Doorbell bits to clear. * * Clear bits in the local doorbell mask register, allowing doorbell interrupts * from being generated for those doorbell bits. If a doorbell bit is already * set at the time the mask is cleared, and the corresponding mask bit is * changed from set to clear, then the ntb driver must ensure that * ntb_db_event() is called. If the hardware does not generate the interrupt * on clearing the mask bit, then the driver must call ntb_db_event() anyway. * * Return: Zero on success, otherwise an error number. */ void ntb_db_clear_mask(device_t ntb, uint64_t bits); /* * ntb_db_read() - read the local doorbell register * @ntb: NTB device context. * * Read the local doorbell register, and return the bits that are set. * * Return: The bits currently set in the local doorbell register. */ uint64_t ntb_db_read(device_t ntb); /* * ntb_db_set_mask() - set bits in the local doorbell mask * @ntb: NTB device context. * @db_bits: Doorbell mask bits to set. * * Set bits in the local doorbell mask register, preventing doorbell interrupts * from being generated for those doorbell bits. Bits that were already set * must remain set. * * Return: Zero on success, otherwise an error number. */ void ntb_db_set_mask(device_t ntb, uint64_t bits); /* * ntb_peer_db_set() - Set the doorbell on the secondary/external side * @ntb: pointer to ntb_softc instance * @bit: doorbell bits to ring * * This function allows triggering of a doorbell on the secondary/external * side that will initiate an interrupt on the remote host */ void ntb_peer_db_set(device_t ntb, uint64_t bits); #endif /* _NTB_H_ */ diff --git a/sys/dev/ntb/ntb_hw/ntb_hw_amd.c b/sys/dev/ntb/ntb_hw/ntb_hw_amd.c index 450a3a2fa2b8..2d13dc1321cb 100644 --- a/sys/dev/ntb/ntb_hw/ntb_hw_amd.c +++ b/sys/dev/ntb/ntb_hw/ntb_hw_amd.c @@ -1,1316 +1,1316 @@ /*- * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright (C) 2019 Advanced Micro Devices, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * BSD LICENSE * * Copyright (c) 2019 Advanced Micro Devices, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of AMD corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Contact Information : * Rajesh Kumar */ /* * The Non-Transparent Bridge (NTB) is a device that allows you to connect * two or more systems using a PCI-e links, providing remote memory access. * * This module contains a driver for NTB hardware in AMD CPUs * * Much of the code in this module is shared with Linux. Any patches may * be picked up and redistributed in Linux with a dual GPL/BSD license. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ntb_hw_amd.h" #include "dev/ntb/ntb.h" MALLOC_DEFINE(M_AMD_NTB, "amd_ntb_hw", "amd_ntb_hw driver memory allocations"); static const struct amd_ntb_hw_info amd_ntb_hw_info_list[] = { { .vendor_id = NTB_HW_AMD_VENDOR_ID, .device_id = NTB_HW_AMD_DEVICE_ID1, .mw_count = 3, .bar_start_idx = 1, .spad_count = 16, .db_count = 16, .msix_vector_count = 24, .quirks = QUIRK_MW0_32BIT, .desc = "AMD Non-Transparent Bridge"}, { .vendor_id = NTB_HW_AMD_VENDOR_ID, .device_id = NTB_HW_AMD_DEVICE_ID2, .mw_count = 2, .bar_start_idx = 2, .spad_count = 16, .db_count = 16, .msix_vector_count = 24, .quirks = 0, .desc = "AMD Non-Transparent Bridge"}, { .vendor_id = NTB_HW_HYGON_VENDOR_ID, .device_id = NTB_HW_HYGON_DEVICE_ID1, .mw_count = 3, .bar_start_idx = 1, .spad_count = 16, .db_count = 16, .msix_vector_count = 24, .quirks = QUIRK_MW0_32BIT, .desc = "Hygon Non-Transparent Bridge"}, }; static const struct pci_device_table amd_ntb_devs[] = { { PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID1), .driver_data = (uintptr_t)&amd_ntb_hw_info_list[0], PCI_DESCR("AMD Non-Transparent Bridge") }, { PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID2), .driver_data = (uintptr_t)&amd_ntb_hw_info_list[1], PCI_DESCR("AMD Non-Transparent Bridge") }, { PCI_DEV(NTB_HW_HYGON_VENDOR_ID, NTB_HW_HYGON_DEVICE_ID1), .driver_data = (uintptr_t)&amd_ntb_hw_info_list[0], PCI_DESCR("Hygon Non-Transparent Bridge") } }; static unsigned g_amd_ntb_hw_debug_level; SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_amd_ntb_hw_debug_level, 0, "amd_ntb_hw log level -- higher is verbose"); #define amd_ntb_printf(lvl, ...) do { \ if (lvl <= g_amd_ntb_hw_debug_level) \ device_printf(ntb->device, __VA_ARGS__); \ } while (0) #ifdef __i386__ static __inline uint64_t bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t offset) { return (bus_space_read_4(tag, handle, offset) | ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); } static __inline void bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t offset, uint64_t val) { bus_space_write_4(tag, handle, offset, val); bus_space_write_4(tag, handle, offset + 4, val >> 32); } #endif /* * AMD NTB INTERFACE ROUTINES */ static int amd_ntb_port_number(device_t dev) { struct amd_ntb_softc *ntb = device_get_softc(dev); amd_ntb_printf(1, "%s: conn_type %d\n", __func__, ntb->conn_type); switch (ntb->conn_type) { case NTB_CONN_PRI: return (NTB_PORT_PRI_USD); case NTB_CONN_SEC: return (NTB_PORT_SEC_DSD); default: break; } return (-EINVAL); } static int amd_ntb_peer_port_count(device_t dev) { struct amd_ntb_softc *ntb = device_get_softc(dev); amd_ntb_printf(1, "%s: peer cnt %d\n", __func__, NTB_DEF_PEER_CNT); return (NTB_DEF_PEER_CNT); } static int amd_ntb_peer_port_number(device_t dev, int pidx) { struct amd_ntb_softc *ntb = device_get_softc(dev); amd_ntb_printf(1, "%s: pidx %d conn type %d\n", __func__, pidx, ntb->conn_type); if (pidx != NTB_DEF_PEER_IDX) return (-EINVAL); switch (ntb->conn_type) { case NTB_CONN_PRI: return (NTB_PORT_SEC_DSD); case NTB_CONN_SEC: return (NTB_PORT_PRI_USD); default: break; } return (-EINVAL); } static int amd_ntb_peer_port_idx(device_t dev, int port) { struct amd_ntb_softc *ntb = device_get_softc(dev); int peer_port; peer_port = amd_ntb_peer_port_number(dev, NTB_DEF_PEER_IDX); amd_ntb_printf(1, "%s: port %d peer_port %d\n", __func__, port, peer_port); if (peer_port == -EINVAL || port != peer_port) return (-EINVAL); return (0); } /* * AMD NTB INTERFACE - LINK ROUTINES */ static inline int amd_link_is_up(struct amd_ntb_softc *ntb) { amd_ntb_printf(2, "%s: peer_sta 0x%x cntl_sta 0x%x\n", __func__, ntb->peer_sta, ntb->cntl_sta); if (!ntb->peer_sta) return (NTB_LNK_STA_ACTIVE(ntb->cntl_sta)); return (0); } static inline enum ntb_speed amd_ntb_link_sta_speed(struct amd_ntb_softc *ntb) { if (!amd_link_is_up(ntb)) return (NTB_SPEED_NONE); return (NTB_LNK_STA_SPEED(ntb->lnk_sta)); } static inline enum ntb_width amd_ntb_link_sta_width(struct amd_ntb_softc *ntb) { if (!amd_link_is_up(ntb)) return (NTB_WIDTH_NONE); return (NTB_LNK_STA_WIDTH(ntb->lnk_sta)); } static bool amd_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width) { struct amd_ntb_softc *ntb = device_get_softc(dev); if (speed != NULL) *speed = amd_ntb_link_sta_speed(ntb); if (width != NULL) *width = amd_ntb_link_sta_width(ntb); return (amd_link_is_up(ntb)); } static int amd_ntb_link_enable(device_t dev, enum ntb_speed max_speed, enum ntb_width max_width) { struct amd_ntb_softc *ntb = device_get_softc(dev); uint32_t ntb_ctl; amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n", __func__, ntb->int_mask, ntb->conn_type); amd_init_side_info(ntb); /* Enable event interrupt */ ntb->int_mask &= ~AMD_EVENT_INTMASK; amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask); if (ntb->conn_type == NTB_CONN_SEC) return (EINVAL); amd_ntb_printf(0, "%s: Enabling Link.\n", __func__); ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET); ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl); amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl); return (0); } static int amd_ntb_link_disable(device_t dev) { struct amd_ntb_softc *ntb = device_get_softc(dev); uint32_t ntb_ctl; amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n", __func__, ntb->int_mask, ntb->conn_type); amd_deinit_side_info(ntb); /* Disable event interrupt */ ntb->int_mask |= AMD_EVENT_INTMASK; amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask); if (ntb->conn_type == NTB_CONN_SEC) return (EINVAL); amd_ntb_printf(0, "%s: Disabling Link.\n", __func__); ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET); ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl); amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl); return (0); } /* * AMD NTB memory window routines */ static uint8_t amd_ntb_mw_count(device_t dev) { struct amd_ntb_softc *ntb = device_get_softc(dev); return (ntb->hw_info->mw_count); } static int amd_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base, caddr_t *vbase, size_t *size, size_t *align, size_t *align_size, bus_addr_t *plimit) { struct amd_ntb_softc *ntb = device_get_softc(dev); struct amd_ntb_pci_bar_info *bar_info; if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) return (EINVAL); bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; if (base != NULL) *base = bar_info->pbase; if (vbase != NULL) *vbase = bar_info->vbase; if (align != NULL) *align = bar_info->size; if (size != NULL) *size = bar_info->size; if (align_size != NULL) *align_size = 1; if (plimit != NULL) { /* * For Device ID 0x145B (which has 3 memory windows), * memory window 0 use a 32-bit bar. The remaining * cases all use 64-bit bar. */ if ((mw_idx == 0) && (ntb->hw_info->quirks & QUIRK_MW0_32BIT)) *plimit = BUS_SPACE_MAXADDR_32BIT; else *plimit = BUS_SPACE_MAXADDR; } return (0); } static int amd_ntb_mw_set_trans(device_t dev, unsigned mw_idx, bus_addr_t addr, size_t size) { struct amd_ntb_softc *ntb = device_get_softc(dev); struct amd_ntb_pci_bar_info *bar_info; if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) return (EINVAL); bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; /* Make sure the range fits in the usable mw size. */ if (size > bar_info->size) { amd_ntb_printf(0, "%s: size 0x%jx greater than mw_size 0x%jx\n", __func__, (uintmax_t)size, (uintmax_t)bar_info->size); return (EINVAL); } amd_ntb_printf(1, "%s: mw %d mw_size 0x%jx size 0x%jx base %p\n", __func__, mw_idx, (uintmax_t)bar_info->size, (uintmax_t)size, (void *)bar_info->pci_bus_handle); /* * AMD NTB XLAT and Limit registers needs to be written only after * link enable. * * Set and verify setting the translation address register. */ amd_ntb_peer_reg_write(8, bar_info->xlat_off, (uint64_t)addr); amd_ntb_printf(0, "%s: mw %d xlat_off 0x%x cur_val 0x%jx addr %p\n", __func__, mw_idx, bar_info->xlat_off, amd_ntb_peer_reg_read(8, bar_info->xlat_off), (void *)addr); /* * Set and verify setting the limit register. * * For Device ID 0x145B (which has 3 memory windows), * memory window 0 use a 32-bit bar. The remaining * cases all use 64-bit bar. */ if ((mw_idx == 0) && (ntb->hw_info->quirks & QUIRK_MW0_32BIT)) { amd_ntb_reg_write(4, bar_info->limit_off, (uint32_t)size); amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%x limit 0x%x\n", __func__, bar_info->limit_off, amd_ntb_peer_reg_read(4, bar_info->limit_off), (uint32_t)size); } else { amd_ntb_reg_write(8, bar_info->limit_off, (uint64_t)size); amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%jx limit 0x%jx\n", __func__, bar_info->limit_off, amd_ntb_peer_reg_read(8, bar_info->limit_off), (uintmax_t)size); } return (0); } static int amd_ntb_mw_clear_trans(device_t dev, unsigned mw_idx) { struct amd_ntb_softc *ntb = device_get_softc(dev); amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx); if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) return (EINVAL); return (amd_ntb_mw_set_trans(dev, mw_idx, 0, 0)); } static int amd_ntb_mw_set_wc(device_t dev, unsigned int mw_idx, vm_memattr_t mode) { struct amd_ntb_softc *ntb = device_get_softc(dev); struct amd_ntb_pci_bar_info *bar_info; int rc; if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) return (EINVAL); bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; if (mode == bar_info->map_mode) return (0); rc = pmap_change_attr((vm_offset_t)bar_info->vbase, bar_info->size, mode); if (rc == 0) bar_info->map_mode = mode; return (rc); } static int amd_ntb_mw_get_wc(device_t dev, unsigned mw_idx, vm_memattr_t *mode) { struct amd_ntb_softc *ntb = device_get_softc(dev); struct amd_ntb_pci_bar_info *bar_info; amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx); if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) return (EINVAL); bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; *mode = bar_info->map_mode; return (0); } /* * AMD NTB doorbell routines */ static int amd_ntb_db_vector_count(device_t dev) { struct amd_ntb_softc *ntb = device_get_softc(dev); amd_ntb_printf(1, "%s: db_count 0x%x\n", __func__, ntb->hw_info->db_count); return (ntb->hw_info->db_count); } static uint64_t amd_ntb_db_valid_mask(device_t dev) { struct amd_ntb_softc *ntb = device_get_softc(dev); amd_ntb_printf(1, "%s: db_valid_mask 0x%x\n", __func__, ntb->db_valid_mask); return (ntb->db_valid_mask); } static uint64_t amd_ntb_db_vector_mask(device_t dev, uint32_t vector) { struct amd_ntb_softc *ntb = device_get_softc(dev); amd_ntb_printf(1, "%s: vector %d db_count 0x%x db_valid_mask 0x%x\n", __func__, vector, ntb->hw_info->db_count, ntb->db_valid_mask); if (vector < 0 || vector >= ntb->hw_info->db_count) return (0); return (ntb->db_valid_mask & (1 << vector)); } static uint64_t amd_ntb_db_read(device_t dev) { struct amd_ntb_softc *ntb = device_get_softc(dev); uint64_t dbstat_off; dbstat_off = (uint64_t)amd_ntb_reg_read(2, AMD_DBSTAT_OFFSET); amd_ntb_printf(1, "%s: dbstat_off 0x%jx\n", __func__, dbstat_off); return (dbstat_off); } static void amd_ntb_db_clear(device_t dev, uint64_t db_bits) { struct amd_ntb_softc *ntb = device_get_softc(dev); amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits); amd_ntb_reg_write(2, AMD_DBSTAT_OFFSET, (uint16_t)db_bits); } static void amd_ntb_db_set_mask(device_t dev, uint64_t db_bits) { struct amd_ntb_softc *ntb = device_get_softc(dev); DB_MASK_LOCK(ntb); amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n", __func__, ntb->db_mask, db_bits); ntb->db_mask |= db_bits; amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask); DB_MASK_UNLOCK(ntb); } static void amd_ntb_db_clear_mask(device_t dev, uint64_t db_bits) { struct amd_ntb_softc *ntb = device_get_softc(dev); DB_MASK_LOCK(ntb); amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n", __func__, ntb->db_mask, db_bits); ntb->db_mask &= ~db_bits; amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask); DB_MASK_UNLOCK(ntb); } static void amd_ntb_peer_db_set(device_t dev, uint64_t db_bits) { struct amd_ntb_softc *ntb = device_get_softc(dev); amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits); amd_ntb_reg_write(2, AMD_DBREQ_OFFSET, (uint16_t)db_bits); } /* * AMD NTB scratchpad routines */ static uint8_t amd_ntb_spad_count(device_t dev) { struct amd_ntb_softc *ntb = device_get_softc(dev); amd_ntb_printf(1, "%s: spad_count 0x%x\n", __func__, ntb->spad_count); return (ntb->spad_count); } static int amd_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val) { struct amd_ntb_softc *ntb = device_get_softc(dev); uint32_t offset; amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); if (idx < 0 || idx >= ntb->spad_count) return (EINVAL); offset = ntb->self_spad + (idx << 2); *val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset); amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val); return (0); } static int amd_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val) { struct amd_ntb_softc *ntb = device_get_softc(dev); uint32_t offset; amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); if (idx < 0 || idx >= ntb->spad_count) return (EINVAL); offset = ntb->self_spad + (idx << 2); amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val); amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val); return (0); } static void amd_ntb_spad_clear(struct amd_ntb_softc *ntb) { uint8_t i; for (i = 0; i < ntb->spad_count; i++) amd_ntb_spad_write(ntb->device, i, 0); } static int amd_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val) { struct amd_ntb_softc *ntb = device_get_softc(dev); uint32_t offset; amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); if (idx < 0 || idx >= ntb->spad_count) return (EINVAL); offset = ntb->peer_spad + (idx << 2); *val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset); amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val); return (0); } static int amd_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val) { struct amd_ntb_softc *ntb = device_get_softc(dev); uint32_t offset; amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); if (idx < 0 || idx >= ntb->spad_count) return (EINVAL); offset = ntb->peer_spad + (idx << 2); amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val); amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val); return (0); } /* * AMD NTB INIT */ static int amd_ntb_hw_info_handler(SYSCTL_HANDLER_ARGS) { struct amd_ntb_softc* ntb = arg1; struct sbuf *sb; int rc = 0; sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); if (sb == NULL) return (sb->s_error); sbuf_printf(sb, "NTB AMD Hardware info:\n\n"); sbuf_printf(sb, "AMD NTB side: %s\n", (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY"); sbuf_printf(sb, "AMD LNK STA: 0x%#06x\n", ntb->lnk_sta); if (!amd_link_is_up(ntb)) sbuf_printf(sb, "AMD Link Status: Down\n"); else { sbuf_printf(sb, "AMD Link Status: Up\n"); sbuf_printf(sb, "AMD Link Speed: PCI-E Gen %u\n", NTB_LNK_STA_SPEED(ntb->lnk_sta)); sbuf_printf(sb, "AMD Link Width: PCI-E Width %u\n", NTB_LNK_STA_WIDTH(ntb->lnk_sta)); } sbuf_printf(sb, "AMD Memory window count: %d\n", ntb->hw_info->mw_count); sbuf_printf(sb, "AMD Spad count: %d\n", ntb->spad_count); sbuf_printf(sb, "AMD Doorbell count: %d\n", ntb->hw_info->db_count); sbuf_printf(sb, "AMD MSI-X vec count: %d\n\n", ntb->msix_vec_count); sbuf_printf(sb, "AMD Doorbell valid mask: 0x%x\n", ntb->db_valid_mask); sbuf_printf(sb, "AMD Doorbell Mask: 0x%x\n", amd_ntb_reg_read(4, AMD_DBMASK_OFFSET)); sbuf_printf(sb, "AMD Doorbell: 0x%x\n", amd_ntb_reg_read(4, AMD_DBSTAT_OFFSET)); sbuf_printf(sb, "AMD NTB Incoming XLAT: \n"); sbuf_printf(sb, "AMD XLAT1: 0x%jx\n", amd_ntb_peer_reg_read(8, AMD_BAR1XLAT_OFFSET)); sbuf_printf(sb, "AMD XLAT23: 0x%jx\n", amd_ntb_peer_reg_read(8, AMD_BAR23XLAT_OFFSET)); sbuf_printf(sb, "AMD XLAT45: 0x%jx\n", amd_ntb_peer_reg_read(8, AMD_BAR45XLAT_OFFSET)); sbuf_printf(sb, "AMD LMT1: 0x%x\n", amd_ntb_reg_read(4, AMD_BAR1LMT_OFFSET)); sbuf_printf(sb, "AMD LMT23: 0x%jx\n", amd_ntb_reg_read(8, AMD_BAR23LMT_OFFSET)); sbuf_printf(sb, "AMD LMT45: 0x%jx\n", amd_ntb_reg_read(8, AMD_BAR45LMT_OFFSET)); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } static void amd_ntb_sysctl_init(struct amd_ntb_softc *ntb) { struct sysctl_oid_list *globals; struct sysctl_ctx_list *ctx; ctx = device_get_sysctl_ctx(ntb->device); globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device)); SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "info", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ntb, 0, amd_ntb_hw_info_handler, "A", "AMD NTB HW Information"); } /* * Polls the HW link status register(s); returns true if something has changed. */ static bool amd_ntb_poll_link(struct amd_ntb_softc *ntb) { uint32_t fullreg, reg, stat; fullreg = amd_ntb_peer_reg_read(4, AMD_SIDEINFO_OFFSET); reg = fullreg & NTB_LIN_STA_ACTIVE_BIT; if (reg == ntb->cntl_sta) return (false); amd_ntb_printf(0, "%s: SIDEINFO reg_val = 0x%x cntl_sta 0x%x\n", __func__, fullreg, ntb->cntl_sta); ntb->cntl_sta = reg; stat = pci_read_config(ntb->device, AMD_LINK_STATUS_OFFSET, 4); amd_ntb_printf(0, "%s: LINK_STATUS stat = 0x%x lnk_sta 0x%x.\n", __func__, stat, ntb->lnk_sta); ntb->lnk_sta = stat; return (true); } static void amd_link_hb(void *arg) { struct amd_ntb_softc *ntb = arg; if (amd_ntb_poll_link(ntb)) ntb_link_event(ntb->device); if (!amd_link_is_up(ntb)) { callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT, amd_link_hb, ntb); } else { callout_reset(&ntb->hb_timer, (AMD_LINK_HB_TIMEOUT * 10), amd_link_hb, ntb); } } static void amd_ntb_interrupt(struct amd_ntb_softc *ntb, uint16_t vec) { if (vec < ntb->hw_info->db_count) ntb_db_event(ntb->device, vec); else amd_ntb_printf(0, "Invalid vector %d\n", vec); } static void amd_ntb_vec_isr(void *arg) { struct amd_ntb_vec *nvec = arg; amd_ntb_interrupt(nvec->ntb, nvec->num); } static void amd_ntb_irq_isr(void *arg) { /* If we couldn't set up MSI-X, we only have the one vector. */ amd_ntb_interrupt(arg, 0); } static void amd_init_side_info(struct amd_ntb_softc *ntb) { unsigned int reg; reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); if (!(reg & AMD_SIDE_READY)) { reg |= AMD_SIDE_READY; amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg); } reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); } static void amd_deinit_side_info(struct amd_ntb_softc *ntb) { unsigned int reg; reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); if (reg & AMD_SIDE_READY) { reg &= ~AMD_SIDE_READY; amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg); amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); } } static int amd_ntb_setup_isr(struct amd_ntb_softc *ntb, uint16_t num_vectors, bool msi, bool intx) { uint16_t i; int flags = 0, rc = 0; flags |= RF_ACTIVE; if (intx) flags |= RF_SHAREABLE; for (i = 0; i < num_vectors; i++) { /* RID should be 0 for intx */ if (intx) ntb->int_info[i].rid = i; else ntb->int_info[i].rid = i + 1; ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, &ntb->int_info[i].rid, flags); if (ntb->int_info[i].res == NULL) { amd_ntb_printf(0, "bus_alloc_resource IRQ failed\n"); return (ENOMEM); } ntb->int_info[i].tag = NULL; ntb->allocated_interrupts++; if (msi || intx) { rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_irq_isr, ntb, &ntb->int_info[i].tag); } else { rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_vec_isr, &ntb->msix_vec[i], &ntb->int_info[i].tag); } if (rc != 0) { amd_ntb_printf(0, "bus_setup_intr %d failed\n", i); return (ENXIO); } } return (0); } static int amd_ntb_create_msix_vec(struct amd_ntb_softc *ntb, uint32_t max_vectors) { uint8_t i; ntb->msix_vec = malloc(max_vectors * sizeof(*ntb->msix_vec), M_AMD_NTB, M_ZERO | M_WAITOK); for (i = 0; i < max_vectors; i++) { ntb->msix_vec[i].num = i; ntb->msix_vec[i].ntb = ntb; } return (0); } static void amd_ntb_free_msix_vec(struct amd_ntb_softc *ntb) { if (ntb->msix_vec_count) { pci_release_msi(ntb->device); ntb->msix_vec_count = 0; } if (ntb->msix_vec != NULL) { free(ntb->msix_vec, M_AMD_NTB); ntb->msix_vec = NULL; } } static int amd_ntb_init_isr(struct amd_ntb_softc *ntb) { uint32_t supported_vectors, num_vectors; bool msi = false, intx = false; int rc = 0; ntb->db_mask = ntb->db_valid_mask; rc = amd_ntb_create_msix_vec(ntb, ntb->hw_info->msix_vector_count); if (rc != 0) { amd_ntb_printf(0, "Error creating msix vectors: %d\n", rc); return (ENOMEM); } /* * Check the number of MSI-X message supported by the device. * Minimum necessary MSI-X message count should be equal to db_count. */ supported_vectors = pci_msix_count(ntb->device); num_vectors = MIN(supported_vectors, ntb->hw_info->db_count); if (num_vectors < ntb->hw_info->db_count) { amd_ntb_printf(0, "No minimum msix: supported %d db %d\n", supported_vectors, ntb->hw_info->db_count); msi = true; goto err_msix_enable; } /* Allocate the necessary number of MSI-x messages */ rc = pci_alloc_msix(ntb->device, &num_vectors); if (rc != 0) { amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc); msi = true; goto err_msix_enable; } if (num_vectors < ntb->hw_info->db_count) { amd_ntb_printf(0, "Allocated only %d MSI-X\n", num_vectors); msi = true; /* * Else set ntb->hw_info->db_count = ntb->msix_vec_count = * num_vectors, msi=false and dont release msi. */ } err_msix_enable: if (msi) { free(ntb->msix_vec, M_AMD_NTB); ntb->msix_vec = NULL; pci_release_msi(ntb->device); num_vectors = 1; rc = pci_alloc_msi(ntb->device, &num_vectors); if (rc != 0) { amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc); msi = false; intx = true; } } ntb->hw_info->db_count = ntb->msix_vec_count = num_vectors; if (intx) { num_vectors = 1; ntb->hw_info->db_count = 1; ntb->msix_vec_count = 0; } amd_ntb_printf(0, "%s: db %d msix %d msi %d intx %d\n", __func__, ntb->hw_info->db_count, ntb->msix_vec_count, (int)msi, (int)intx); rc = amd_ntb_setup_isr(ntb, num_vectors, msi, intx); if (rc != 0) { amd_ntb_printf(0, "Error setting up isr: %d\n", rc); amd_ntb_free_msix_vec(ntb); } return (rc); } static void amd_ntb_deinit_isr(struct amd_ntb_softc *ntb) { struct amd_ntb_int_info *current_int; int i; /* Mask all doorbell interrupts */ ntb->db_mask = ntb->db_valid_mask; amd_ntb_reg_write(4, AMD_DBMASK_OFFSET, ntb->db_mask); for (i = 0; i < ntb->allocated_interrupts; i++) { current_int = &ntb->int_info[i]; if (current_int->tag != NULL) bus_teardown_intr(ntb->device, current_int->res, current_int->tag); if (current_int->res != NULL) bus_release_resource(ntb->device, SYS_RES_IRQ, rman_get_rid(current_int->res), current_int->res); } amd_ntb_free_msix_vec(ntb); } static enum amd_ntb_conn_type amd_ntb_get_topo(struct amd_ntb_softc *ntb) { uint32_t info; info = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); if (info & AMD_SIDE_MASK) return (NTB_CONN_SEC); return (NTB_CONN_PRI); } static int amd_ntb_init_dev(struct amd_ntb_softc *ntb) { ntb->db_valid_mask = (1ull << ntb->hw_info->db_count) - 1; mtx_init(&ntb->db_mask_lock, "amd ntb db bits", NULL, MTX_SPIN); switch (ntb->conn_type) { case NTB_CONN_PRI: case NTB_CONN_SEC: ntb->spad_count >>= 1; if (ntb->conn_type == NTB_CONN_PRI) { ntb->self_spad = 0; ntb->peer_spad = 0x20; } else { ntb->self_spad = 0x20; ntb->peer_spad = 0; } callout_init(&ntb->hb_timer, 1); callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT, amd_link_hb, ntb); break; default: amd_ntb_printf(0, "Unsupported AMD NTB topology %d\n", ntb->conn_type); return (EINVAL); } ntb->int_mask = AMD_EVENT_INTMASK; amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask); return (0); } static int amd_ntb_init(struct amd_ntb_softc *ntb) { int rc = 0; ntb->conn_type = amd_ntb_get_topo(ntb); amd_ntb_printf(0, "AMD NTB Side: %s\n", (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY"); rc = amd_ntb_init_dev(ntb); if (rc != 0) return (rc); rc = amd_ntb_init_isr(ntb); if (rc != 0) return (rc); return (0); } static void print_map_success(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar, const char *kind) { amd_ntb_printf(0, "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n", PCI_RID2BAR(bar->pci_resource_id), bar->vbase, (char *)bar->vbase + bar->size - 1, (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), (uintmax_t)bar->size, kind); } static void save_bar_parameters(struct amd_ntb_pci_bar_info *bar) { bar->pci_bus_tag = rman_get_bustag(bar->pci_resource); bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource); bar->pbase = rman_get_start(bar->pci_resource); bar->size = rman_get_size(bar->pci_resource); bar->vbase = rman_get_virtual(bar->pci_resource); bar->map_mode = VM_MEMATTR_UNCACHEABLE; } static int map_bar(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar) { bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, &bar->pci_resource_id, RF_ACTIVE); if (bar->pci_resource == NULL) return (ENXIO); save_bar_parameters(bar); print_map_success(ntb, bar, "mmr"); return (0); } static int amd_ntb_map_pci_bars(struct amd_ntb_softc *ntb) { int rc = 0; /* NTB Config/Control registers - BAR 0 */ ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0); rc = map_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]); if (rc != 0) goto out; /* Memory Window 0 BAR - BAR 1 */ ntb->bar_info[NTB_BAR_1].pci_resource_id = PCIR_BAR(1); rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_1]); if (rc != 0) goto out; ntb->bar_info[NTB_BAR_1].xlat_off = AMD_BAR1XLAT_OFFSET; ntb->bar_info[NTB_BAR_1].limit_off = AMD_BAR1LMT_OFFSET; /* Memory Window 1 BAR - BAR 2&3 */ ntb->bar_info[NTB_BAR_2].pci_resource_id = PCIR_BAR(2); rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_2]); if (rc != 0) goto out; ntb->bar_info[NTB_BAR_2].xlat_off = AMD_BAR23XLAT_OFFSET; ntb->bar_info[NTB_BAR_2].limit_off = AMD_BAR23LMT_OFFSET; /* Memory Window 2 BAR - BAR 4&5 */ ntb->bar_info[NTB_BAR_3].pci_resource_id = PCIR_BAR(4); rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_3]); if (rc != 0) goto out; ntb->bar_info[NTB_BAR_3].xlat_off = AMD_BAR45XLAT_OFFSET; ntb->bar_info[NTB_BAR_3].limit_off = AMD_BAR45LMT_OFFSET; out: if (rc != 0) amd_ntb_printf(0, "unable to allocate pci resource\n"); return (rc); } static void amd_ntb_unmap_pci_bars(struct amd_ntb_softc *ntb) { struct amd_ntb_pci_bar_info *bar_info; int i; for (i = 0; i < NTB_MAX_BARS; i++) { bar_info = &ntb->bar_info[i]; if (bar_info->pci_resource != NULL) bus_release_resource(ntb->device, SYS_RES_MEMORY, bar_info->pci_resource_id, bar_info->pci_resource); } } static int amd_ntb_probe(device_t device) { struct amd_ntb_softc *ntb = device_get_softc(device); const struct pci_device_table *tbl; tbl = PCI_MATCH(device, amd_ntb_devs); if (tbl == NULL) return (ENXIO); ntb->hw_info = (struct amd_ntb_hw_info *)tbl->driver_data; ntb->spad_count = ntb->hw_info->spad_count; device_set_desc(device, tbl->descr); return (BUS_PROBE_GENERIC); } static int amd_ntb_attach(device_t device) { struct amd_ntb_softc *ntb = device_get_softc(device); int error; ntb->device = device; /* Enable PCI bus mastering for "device" */ pci_enable_busmaster(ntb->device); error = amd_ntb_map_pci_bars(ntb); if (error) goto out; error = amd_ntb_init(ntb); if (error) goto out; amd_init_side_info(ntb); amd_ntb_spad_clear(ntb); amd_ntb_sysctl_init(ntb); /* Attach children to this controller */ error = ntb_register_device(device); out: if (error) amd_ntb_detach(device); return (error); } static int amd_ntb_detach(device_t device) { struct amd_ntb_softc *ntb = device_get_softc(device); ntb_unregister_device(device); amd_deinit_side_info(ntb); callout_drain(&ntb->hb_timer); amd_ntb_deinit_isr(ntb); mtx_destroy(&ntb->db_mask_lock); pci_disable_busmaster(ntb->device); amd_ntb_unmap_pci_bars(ntb); return (0); } static device_method_t ntb_amd_methods[] = { /* Device interface */ DEVMETHOD(device_probe, amd_ntb_probe), DEVMETHOD(device_attach, amd_ntb_attach), DEVMETHOD(device_detach, amd_ntb_detach), /* Bus interface */ - DEVMETHOD(bus_child_location_str, ntb_child_location_str), + DEVMETHOD(bus_child_location, ntb_child_location), DEVMETHOD(bus_print_child, ntb_print_child), DEVMETHOD(bus_get_dma_tag, ntb_get_dma_tag), /* NTB interface */ DEVMETHOD(ntb_port_number, amd_ntb_port_number), DEVMETHOD(ntb_peer_port_count, amd_ntb_peer_port_count), DEVMETHOD(ntb_peer_port_number, amd_ntb_peer_port_number), DEVMETHOD(ntb_peer_port_idx, amd_ntb_peer_port_idx), DEVMETHOD(ntb_link_is_up, amd_ntb_link_is_up), DEVMETHOD(ntb_link_enable, amd_ntb_link_enable), DEVMETHOD(ntb_link_disable, amd_ntb_link_disable), DEVMETHOD(ntb_mw_count, amd_ntb_mw_count), DEVMETHOD(ntb_mw_get_range, amd_ntb_mw_get_range), DEVMETHOD(ntb_mw_set_trans, amd_ntb_mw_set_trans), DEVMETHOD(ntb_mw_clear_trans, amd_ntb_mw_clear_trans), DEVMETHOD(ntb_mw_set_wc, amd_ntb_mw_set_wc), DEVMETHOD(ntb_mw_get_wc, amd_ntb_mw_get_wc), DEVMETHOD(ntb_db_valid_mask, amd_ntb_db_valid_mask), DEVMETHOD(ntb_db_vector_count, amd_ntb_db_vector_count), DEVMETHOD(ntb_db_vector_mask, amd_ntb_db_vector_mask), DEVMETHOD(ntb_db_read, amd_ntb_db_read), DEVMETHOD(ntb_db_clear, amd_ntb_db_clear), DEVMETHOD(ntb_db_set_mask, amd_ntb_db_set_mask), DEVMETHOD(ntb_db_clear_mask, amd_ntb_db_clear_mask), DEVMETHOD(ntb_peer_db_set, amd_ntb_peer_db_set), DEVMETHOD(ntb_spad_count, amd_ntb_spad_count), DEVMETHOD(ntb_spad_read, amd_ntb_spad_read), DEVMETHOD(ntb_spad_write, amd_ntb_spad_write), DEVMETHOD(ntb_peer_spad_read, amd_ntb_peer_spad_read), DEVMETHOD(ntb_peer_spad_write, amd_ntb_peer_spad_write), DEVMETHOD_END }; static DEFINE_CLASS_0(ntb_hw, ntb_amd_driver, ntb_amd_methods, sizeof(struct amd_ntb_softc)); DRIVER_MODULE(ntb_hw_amd, pci, ntb_amd_driver, ntb_hw_devclass, NULL, NULL); MODULE_DEPEND(ntb_hw_amd, ntb, 1, 1, 1); MODULE_VERSION(ntb_hw_amd, 1); PCI_PNP_INFO(amd_ntb_devs); diff --git a/sys/dev/ntb/ntb_hw/ntb_hw_intel.c b/sys/dev/ntb/ntb_hw/ntb_hw_intel.c index 8eb210efb065..88fc3d6a36a1 100644 --- a/sys/dev/ntb/ntb_hw/ntb_hw_intel.c +++ b/sys/dev/ntb/ntb_hw/ntb_hw_intel.c @@ -1,3521 +1,3521 @@ /*- * Copyright (c) 2016-2017 Alexander Motin * Copyright (C) 2013 Intel Corporation * Copyright (C) 2015 EMC Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * The Non-Transparent Bridge (NTB) is a device that allows you to connect * two or more systems using a PCI-e links, providing remote memory access. * * This module contains a driver for NTB hardware in Intel Xeon/Atom CPUs. * * NOTE: Much of the code in this module is shared with Linux. Any patches may * be picked up and redistributed in Linux with a dual GPL/BSD license. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ntb_hw_intel.h" #include "../ntb.h" #define MAX_MSIX_INTERRUPTS \ MAX(MAX(XEON_DB_COUNT, ATOM_DB_COUNT), XEON_GEN3_DB_COUNT) #define NTB_HB_TIMEOUT 1 /* second */ #define ATOM_LINK_RECOVERY_TIME 500 /* ms */ #define BAR_HIGH_MASK (~((1ull << 12) - 1)) #define NTB_MSIX_VER_GUARD 0xaabbccdd #define NTB_MSIX_RECEIVED 0xe0f0e0f0 /* * PCI constants could be somewhere more generic, but aren't defined/used in * pci.c. */ #define PCI_MSIX_ENTRY_SIZE 16 #define PCI_MSIX_ENTRY_LOWER_ADDR 0 #define PCI_MSIX_ENTRY_UPPER_ADDR 4 #define PCI_MSIX_ENTRY_DATA 8 enum ntb_device_type { NTB_XEON_GEN1, NTB_XEON_GEN3, NTB_ATOM }; /* ntb_conn_type are hardware numbers, cannot change. */ enum ntb_conn_type { NTB_CONN_TRANSPARENT = 0, NTB_CONN_B2B = 1, NTB_CONN_RP = 2, }; enum ntb_b2b_direction { NTB_DEV_USD = 0, NTB_DEV_DSD = 1, }; enum ntb_bar { NTB_CONFIG_BAR = 0, NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3, NTB_MAX_BARS }; enum { NTB_MSIX_GUARD = 0, NTB_MSIX_DATA0, NTB_MSIX_DATA1, NTB_MSIX_DATA2, NTB_MSIX_OFS0, NTB_MSIX_OFS1, NTB_MSIX_OFS2, NTB_MSIX_DONE, NTB_MAX_MSIX_SPAD }; /* Device features and workarounds */ #define HAS_FEATURE(ntb, feature) \ (((ntb)->features & (feature)) != 0) struct ntb_hw_info { uint32_t device_id; const char *desc; enum ntb_device_type type; uint32_t features; }; struct ntb_pci_bar_info { bus_space_tag_t pci_bus_tag; bus_space_handle_t pci_bus_handle; int pci_resource_id; struct resource *pci_resource; vm_paddr_t pbase; caddr_t vbase; vm_size_t size; vm_memattr_t map_mode; /* Configuration register offsets */ uint32_t psz_off; uint32_t ssz_off; uint32_t pbarxlat_off; }; struct ntb_int_info { struct resource *res; int rid; void *tag; }; struct ntb_vec { struct ntb_softc *ntb; uint32_t num; unsigned masked; }; struct ntb_reg { uint32_t ntb_ctl; uint32_t lnk_sta; uint8_t db_size; unsigned mw_bar[NTB_MAX_BARS]; }; struct ntb_alt_reg { uint32_t db_bell; uint32_t db_mask; uint32_t spad; }; struct ntb_xlat_reg { uint32_t bar0_base; uint32_t bar2_base; uint32_t bar4_base; uint32_t bar5_base; uint32_t bar2_xlat; uint32_t bar4_xlat; uint32_t bar5_xlat; uint32_t bar2_limit; uint32_t bar4_limit; uint32_t bar5_limit; }; struct ntb_b2b_addr { uint64_t bar0_addr; uint64_t bar2_addr64; uint64_t bar4_addr64; uint64_t bar4_addr32; uint64_t bar5_addr32; }; struct ntb_msix_data { uint32_t nmd_ofs; uint32_t nmd_data; }; struct ntb_softc { /* ntb.c context. Do not move! Must go first! */ void *ntb_store; device_t device; enum ntb_device_type type; uint32_t features; struct ntb_pci_bar_info bar_info[NTB_MAX_BARS]; struct ntb_int_info int_info[MAX_MSIX_INTERRUPTS]; uint32_t allocated_interrupts; struct ntb_msix_data peer_msix_data[XEON_NONLINK_DB_MSIX_BITS]; struct ntb_msix_data msix_data[XEON_NONLINK_DB_MSIX_BITS]; bool peer_msix_good; bool peer_msix_done; struct ntb_pci_bar_info *peer_lapic_bar; struct callout peer_msix_work; bus_dma_tag_t bar0_dma_tag; bus_dmamap_t bar0_dma_map; struct callout heartbeat_timer; struct callout lr_timer; struct ntb_vec *msix_vec; uint32_t ppd; enum ntb_conn_type conn_type; enum ntb_b2b_direction dev_type; /* Offset of peer bar0 in B2B BAR */ uint64_t b2b_off; /* Memory window used to access peer bar0 */ #define B2B_MW_DISABLED UINT8_MAX uint8_t b2b_mw_idx; uint32_t msix_xlat; uint8_t msix_mw_idx; uint8_t mw_count; uint8_t spad_count; uint8_t db_count; uint8_t db_vec_count; uint8_t db_vec_shift; /* Protects local db_mask. */ #define DB_MASK_LOCK(sc) mtx_lock_spin(&(sc)->db_mask_lock) #define DB_MASK_UNLOCK(sc) mtx_unlock_spin(&(sc)->db_mask_lock) #define DB_MASK_ASSERT(sc,f) mtx_assert(&(sc)->db_mask_lock, (f)) struct mtx db_mask_lock; volatile uint32_t ntb_ctl; volatile uint32_t lnk_sta; uint64_t db_valid_mask; uint64_t db_link_mask; uint64_t db_mask; uint64_t fake_db; /* NTB_SB01BASE_LOCKUP*/ uint64_t force_db; /* NTB_SB01BASE_LOCKUP*/ int last_ts; /* ticks @ last irq */ const struct ntb_reg *reg; const struct ntb_alt_reg *self_reg; const struct ntb_alt_reg *peer_reg; const struct ntb_xlat_reg *xlat_reg; }; #ifdef __i386__ static __inline uint64_t bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t offset) { return (bus_space_read_4(tag, handle, offset) | ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); } static __inline void bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t offset, uint64_t val) { bus_space_write_4(tag, handle, offset, val); bus_space_write_4(tag, handle, offset + 4, val >> 32); } #endif #define intel_ntb_bar_read(SIZE, bar, offset) \ bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ ntb->bar_info[(bar)].pci_bus_handle, (offset)) #define intel_ntb_bar_write(SIZE, bar, offset, val) \ bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ ntb->bar_info[(bar)].pci_bus_handle, (offset), (val)) #define intel_ntb_reg_read(SIZE, offset) \ intel_ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset) #define intel_ntb_reg_write(SIZE, offset, val) \ intel_ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val) #define intel_ntb_mw_read(SIZE, offset) \ intel_ntb_bar_read(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \ offset) #define intel_ntb_mw_write(SIZE, offset, val) \ intel_ntb_bar_write(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \ offset, val) static int intel_ntb_probe(device_t device); static int intel_ntb_attach(device_t device); static int intel_ntb_detach(device_t device); static uint64_t intel_ntb_db_valid_mask(device_t dev); static void intel_ntb_spad_clear(device_t dev); static uint64_t intel_ntb_db_vector_mask(device_t dev, uint32_t vector); static bool intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width); static int intel_ntb_link_enable(device_t dev, enum ntb_speed speed, enum ntb_width width); static int intel_ntb_link_disable(device_t dev); static int intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val); static int intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val); static unsigned intel_ntb_user_mw_to_idx(struct ntb_softc *, unsigned uidx); static inline enum ntb_bar intel_ntb_mw_to_bar(struct ntb_softc *, unsigned mw); static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar); static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar, uint32_t *base, uint32_t *xlat, uint32_t *lmt); static int intel_ntb_map_pci_bars(struct ntb_softc *ntb); static int intel_ntb_mw_set_wc_internal(struct ntb_softc *, unsigned idx, vm_memattr_t); static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *, const char *); static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar); static int map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar); static void intel_ntb_unmap_pci_bar(struct ntb_softc *ntb); static int intel_ntb_remap_msix(device_t, uint32_t desired, uint32_t avail); static int intel_ntb_init_isr(struct ntb_softc *ntb); static int intel_ntb_xeon_gen3_init_isr(struct ntb_softc *ntb); static int intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb); static int intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors); static void intel_ntb_teardown_interrupts(struct ntb_softc *ntb); static inline uint64_t intel_ntb_vec_mask(struct ntb_softc *, uint64_t db_vector); static void intel_ntb_interrupt(struct ntb_softc *, uint32_t vec); static void ndev_vec_isr(void *arg); static void ndev_irq_isr(void *arg); static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff); static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t); static inline void db_iowrite_raw(struct ntb_softc *, uint64_t regoff, uint64_t); static int intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors); static void intel_ntb_free_msix_vec(struct ntb_softc *ntb); static void intel_ntb_get_msix_info(struct ntb_softc *ntb); static void intel_ntb_exchange_msix(void *); static struct ntb_hw_info *intel_ntb_get_device_info(uint32_t device_id); static void intel_ntb_detect_max_mw(struct ntb_softc *ntb); static int intel_ntb_detect_xeon(struct ntb_softc *ntb); static int intel_ntb_detect_xeon_gen3(struct ntb_softc *ntb); static int intel_ntb_detect_atom(struct ntb_softc *ntb); static int intel_ntb_xeon_init_dev(struct ntb_softc *ntb); static int intel_ntb_xeon_gen3_init_dev(struct ntb_softc *ntb); static int intel_ntb_atom_init_dev(struct ntb_softc *ntb); static void intel_ntb_teardown_xeon(struct ntb_softc *ntb); static void configure_atom_secondary_side_bars(struct ntb_softc *ntb); static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx, enum ntb_bar regbar); static void xeon_set_sbar_base_and_limit(struct ntb_softc *, uint64_t base_addr, enum ntb_bar idx, enum ntb_bar regbar); static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr, enum ntb_bar idx); static int xeon_setup_b2b_mw(struct ntb_softc *, const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr); static int xeon_gen3_setup_b2b_mw(struct ntb_softc *); static int intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size); static inline bool link_is_up(struct ntb_softc *ntb); static inline bool _xeon_link_is_up(struct ntb_softc *ntb); static inline bool atom_link_is_err(struct ntb_softc *ntb); static inline enum ntb_speed intel_ntb_link_sta_speed(struct ntb_softc *); static inline enum ntb_width intel_ntb_link_sta_width(struct ntb_softc *); static void atom_link_hb(void *arg); static void recover_atom_link(void *arg); static bool intel_ntb_poll_link(struct ntb_softc *ntb); static void save_bar_parameters(struct ntb_pci_bar_info *bar); static void intel_ntb_sysctl_init(struct ntb_softc *); static int sysctl_handle_features(SYSCTL_HANDLER_ARGS); static int sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS); static int sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS); static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS); static int sysctl_handle_register(SYSCTL_HANDLER_ARGS); static unsigned g_ntb_hw_debug_level; SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ntb_hw_debug_level, 0, "ntb_hw log level -- higher is more verbose"); #define intel_ntb_printf(lvl, ...) do { \ if ((lvl) <= g_ntb_hw_debug_level) { \ device_printf(ntb->device, __VA_ARGS__); \ } \ } while (0) #define _NTB_PAT_UC 0 #define _NTB_PAT_WC 1 #define _NTB_PAT_WT 4 #define _NTB_PAT_WP 5 #define _NTB_PAT_WB 6 #define _NTB_PAT_UCM 7 static unsigned g_ntb_mw_pat = _NTB_PAT_UC; SYSCTL_UINT(_hw_ntb, OID_AUTO, default_mw_pat, CTLFLAG_RDTUN, &g_ntb_mw_pat, 0, "Configure the default memory window cache flags (PAT): " "UC: " __XSTRING(_NTB_PAT_UC) ", " "WC: " __XSTRING(_NTB_PAT_WC) ", " "WT: " __XSTRING(_NTB_PAT_WT) ", " "WP: " __XSTRING(_NTB_PAT_WP) ", " "WB: " __XSTRING(_NTB_PAT_WB) ", " "UC-: " __XSTRING(_NTB_PAT_UCM)); static inline vm_memattr_t intel_ntb_pat_flags(void) { switch (g_ntb_mw_pat) { case _NTB_PAT_WC: return (VM_MEMATTR_WRITE_COMBINING); case _NTB_PAT_WT: return (VM_MEMATTR_WRITE_THROUGH); case _NTB_PAT_WP: return (VM_MEMATTR_WRITE_PROTECTED); case _NTB_PAT_WB: return (VM_MEMATTR_WRITE_BACK); case _NTB_PAT_UCM: return (VM_MEMATTR_WEAK_UNCACHEABLE); case _NTB_PAT_UC: /* FALLTHROUGH */ default: return (VM_MEMATTR_UNCACHEABLE); } } /* * Well, this obviously doesn't belong here, but it doesn't seem to exist * anywhere better yet. */ static inline const char * intel_ntb_vm_memattr_to_str(vm_memattr_t pat) { switch (pat) { case VM_MEMATTR_WRITE_COMBINING: return ("WRITE_COMBINING"); case VM_MEMATTR_WRITE_THROUGH: return ("WRITE_THROUGH"); case VM_MEMATTR_WRITE_PROTECTED: return ("WRITE_PROTECTED"); case VM_MEMATTR_WRITE_BACK: return ("WRITE_BACK"); case VM_MEMATTR_WEAK_UNCACHEABLE: return ("UNCACHED"); case VM_MEMATTR_UNCACHEABLE: return ("UNCACHEABLE"); default: return ("UNKNOWN"); } } static int g_ntb_msix_idx = 1; SYSCTL_INT(_hw_ntb, OID_AUTO, msix_mw_idx, CTLFLAG_RDTUN, &g_ntb_msix_idx, 0, "Use this memory window to access the peer MSIX message complex on " "certain Xeon-based NTB systems, as a workaround for a hardware errata. " "Like b2b_mw_idx, negative values index from the last available memory " "window. (Applies on Xeon platforms with SB01BASE_LOCKUP errata.)"); static int g_ntb_mw_idx = -1; SYSCTL_INT(_hw_ntb, OID_AUTO, b2b_mw_idx, CTLFLAG_RDTUN, &g_ntb_mw_idx, 0, "Use this memory window to access the peer NTB registers. A " "non-negative value starts from the first MW index; a negative value " "starts from the last MW index. The default is -1, i.e., the last " "available memory window. Both sides of the NTB MUST set the same " "value here! (Applies on Xeon platforms with SDOORBELL_LOCKUP errata.)"); /* Hardware owns the low 16 bits of features. */ #define NTB_BAR_SIZE_4K (1 << 0) #define NTB_SDOORBELL_LOCKUP (1 << 1) #define NTB_SB01BASE_LOCKUP (1 << 2) #define NTB_B2BDOORBELL_BIT14 (1 << 3) /* Software/configuration owns the top 16 bits. */ #define NTB_SPLIT_BAR (1ull << 16) #define NTB_ONE_MSIX (1ull << 17) #define NTB_FEATURES_STR \ "\20\21SPLIT_BAR4\04B2B_DOORBELL_BIT14\03SB01BASE_LOCKUP" \ "\02SDOORBELL_LOCKUP\01BAR_SIZE_4K" static struct ntb_hw_info pci_ids[] = { /* XXX: PS/SS IDs left out until they are supported. */ { 0x0C4E8086, "BWD Atom Processor S1200 Non-Transparent Bridge B2B", NTB_ATOM, 0 }, { 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B", NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, { 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B", NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, { 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B", NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP | NTB_BAR_SIZE_4K }, { 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B", NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP }, { 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B", NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP }, { 0x201C8086, "SKL Xeon E5 V5 Non-Transparent Bridge B2B", NTB_XEON_GEN3, 0 }, }; static const struct ntb_reg atom_reg = { .ntb_ctl = ATOM_NTBCNTL_OFFSET, .lnk_sta = ATOM_LINK_STATUS_OFFSET, .db_size = sizeof(uint64_t), .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 }, }; static const struct ntb_alt_reg atom_pri_reg = { .db_bell = ATOM_PDOORBELL_OFFSET, .db_mask = ATOM_PDBMSK_OFFSET, .spad = ATOM_SPAD_OFFSET, }; static const struct ntb_alt_reg atom_b2b_reg = { .db_bell = ATOM_B2B_DOORBELL_OFFSET, .spad = ATOM_B2B_SPAD_OFFSET, }; static const struct ntb_xlat_reg atom_sec_xlat = { #if 0 /* "FIXME" says the Linux driver. */ .bar0_base = ATOM_SBAR0BASE_OFFSET, .bar2_base = ATOM_SBAR2BASE_OFFSET, .bar4_base = ATOM_SBAR4BASE_OFFSET, .bar2_limit = ATOM_SBAR2LMT_OFFSET, .bar4_limit = ATOM_SBAR4LMT_OFFSET, #endif .bar2_xlat = ATOM_SBAR2XLAT_OFFSET, .bar4_xlat = ATOM_SBAR4XLAT_OFFSET, }; static const struct ntb_reg xeon_reg = { .ntb_ctl = XEON_NTBCNTL_OFFSET, .lnk_sta = XEON_LINK_STATUS_OFFSET, .db_size = sizeof(uint16_t), .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3 }, }; static const struct ntb_alt_reg xeon_pri_reg = { .db_bell = XEON_PDOORBELL_OFFSET, .db_mask = XEON_PDBMSK_OFFSET, .spad = XEON_SPAD_OFFSET, }; static const struct ntb_alt_reg xeon_b2b_reg = { .db_bell = XEON_B2B_DOORBELL_OFFSET, .spad = XEON_B2B_SPAD_OFFSET, }; static const struct ntb_xlat_reg xeon_sec_xlat = { .bar0_base = XEON_SBAR0BASE_OFFSET, .bar2_base = XEON_SBAR2BASE_OFFSET, .bar4_base = XEON_SBAR4BASE_OFFSET, .bar5_base = XEON_SBAR5BASE_OFFSET, .bar2_limit = XEON_SBAR2LMT_OFFSET, .bar4_limit = XEON_SBAR4LMT_OFFSET, .bar5_limit = XEON_SBAR5LMT_OFFSET, .bar2_xlat = XEON_SBAR2XLAT_OFFSET, .bar4_xlat = XEON_SBAR4XLAT_OFFSET, .bar5_xlat = XEON_SBAR5XLAT_OFFSET, }; static struct ntb_b2b_addr xeon_b2b_usd_addr = { .bar0_addr = XEON_B2B_BAR0_ADDR, .bar2_addr64 = XEON_B2B_BAR2_ADDR64, .bar4_addr64 = XEON_B2B_BAR4_ADDR64, .bar4_addr32 = XEON_B2B_BAR4_ADDR32, .bar5_addr32 = XEON_B2B_BAR5_ADDR32, }; static struct ntb_b2b_addr xeon_b2b_dsd_addr = { .bar0_addr = XEON_B2B_BAR0_ADDR, .bar2_addr64 = XEON_B2B_BAR2_ADDR64, .bar4_addr64 = XEON_B2B_BAR4_ADDR64, .bar4_addr32 = XEON_B2B_BAR4_ADDR32, .bar5_addr32 = XEON_B2B_BAR5_ADDR32, }; static const struct ntb_reg xeon_gen3_reg = { .ntb_ctl = XEON_GEN3_REG_IMNTB_CTRL, .lnk_sta = XEON_GEN3_INT_LNK_STS_OFFSET, .db_size = sizeof(uint32_t), .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 }, }; static const struct ntb_alt_reg xeon_gen3_pri_reg = { .db_bell = XEON_GEN3_REG_EMDOORBELL, .db_mask = XEON_GEN3_REG_IMINT_DISABLE, .spad = XEON_GEN3_REG_IMSPAD, }; static const struct ntb_alt_reg xeon_gen3_b2b_reg = { .db_bell = XEON_GEN3_REG_IMDOORBELL, .db_mask = XEON_GEN3_REG_EMINT_DISABLE, .spad = XEON_GEN3_REG_IMB2B_SSPAD, }; static const struct ntb_xlat_reg xeon_gen3_sec_xlat = { .bar0_base = XEON_GEN3_EXT_REG_BAR0BASE, .bar2_base = XEON_GEN3_EXT_REG_BAR1BASE, .bar4_base = XEON_GEN3_EXT_REG_BAR2BASE, .bar2_limit = XEON_GEN3_REG_IMBAR1XLIMIT, .bar4_limit = XEON_GEN3_REG_IMBAR2XLIMIT, .bar2_xlat = XEON_GEN3_REG_IMBAR1XBASE, .bar4_xlat = XEON_GEN3_REG_IMBAR2XBASE, }; SYSCTL_NODE(_hw_ntb, OID_AUTO, xeon_b2b, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "B2B MW segment overrides -- MUST be the same on both sides"); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar2_addr64, CTLFLAG_RDTUN, &xeon_b2b_usd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " "hardware, use this 64-bit address on the bus between the NTB devices for " "the window at BAR2, on the upstream side of the link. MUST be the same " "address on both sides."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr64, CTLFLAG_RDTUN, &xeon_b2b_usd_addr.bar4_addr64, 0, "See usd_bar2_addr64, but BAR4."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr32, CTLFLAG_RDTUN, &xeon_b2b_usd_addr.bar4_addr32, 0, "See usd_bar2_addr64, but BAR4 " "(split-BAR mode)."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar5_addr32, CTLFLAG_RDTUN, &xeon_b2b_usd_addr.bar5_addr32, 0, "See usd_bar2_addr64, but BAR5 " "(split-BAR mode)."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar2_addr64, CTLFLAG_RDTUN, &xeon_b2b_dsd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " "hardware, use this 64-bit address on the bus between the NTB devices for " "the window at BAR2, on the downstream side of the link. MUST be the same" " address on both sides."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr64, CTLFLAG_RDTUN, &xeon_b2b_dsd_addr.bar4_addr64, 0, "See dsd_bar2_addr64, but BAR4."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr32, CTLFLAG_RDTUN, &xeon_b2b_dsd_addr.bar4_addr32, 0, "See dsd_bar2_addr64, but BAR4 " "(split-BAR mode)."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar5_addr32, CTLFLAG_RDTUN, &xeon_b2b_dsd_addr.bar5_addr32, 0, "See dsd_bar2_addr64, but BAR5 " "(split-BAR mode)."); /* * OS <-> Driver interface structures */ MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations"); /* * OS <-> Driver linkage functions */ static int intel_ntb_probe(device_t device) { struct ntb_hw_info *p; p = intel_ntb_get_device_info(pci_get_devid(device)); if (p == NULL) return (ENXIO); device_set_desc(device, p->desc); return (0); } static int intel_ntb_attach(device_t device) { struct ntb_softc *ntb; struct ntb_hw_info *p; int error; ntb = device_get_softc(device); p = intel_ntb_get_device_info(pci_get_devid(device)); ntb->device = device; ntb->type = p->type; ntb->features = p->features; ntb->b2b_mw_idx = B2B_MW_DISABLED; ntb->msix_mw_idx = B2B_MW_DISABLED; /* Heartbeat timer for NTB_ATOM since there is no link interrupt */ callout_init(&ntb->heartbeat_timer, 1); callout_init(&ntb->lr_timer, 1); callout_init(&ntb->peer_msix_work, 1); mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN); if (ntb->type == NTB_ATOM) error = intel_ntb_detect_atom(ntb); else if (ntb->type == NTB_XEON_GEN3) error = intel_ntb_detect_xeon_gen3(ntb); else error = intel_ntb_detect_xeon(ntb); if (error != 0) goto out; intel_ntb_detect_max_mw(ntb); pci_enable_busmaster(ntb->device); error = intel_ntb_map_pci_bars(ntb); if (error != 0) goto out; if (ntb->type == NTB_ATOM) error = intel_ntb_atom_init_dev(ntb); else if (ntb->type == NTB_XEON_GEN3) error = intel_ntb_xeon_gen3_init_dev(ntb); else error = intel_ntb_xeon_init_dev(ntb); if (error != 0) goto out; intel_ntb_spad_clear(device); intel_ntb_poll_link(ntb); intel_ntb_sysctl_init(ntb); /* Attach children to this controller */ error = ntb_register_device(device); out: if (error != 0) intel_ntb_detach(device); return (error); } static int intel_ntb_detach(device_t device) { struct ntb_softc *ntb; ntb = device_get_softc(device); /* Detach & delete all children */ ntb_unregister_device(device); if (ntb->self_reg != NULL) { DB_MASK_LOCK(ntb); db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_valid_mask); DB_MASK_UNLOCK(ntb); } callout_drain(&ntb->heartbeat_timer); callout_drain(&ntb->lr_timer); callout_drain(&ntb->peer_msix_work); pci_disable_busmaster(ntb->device); if (ntb->type == NTB_XEON_GEN1) intel_ntb_teardown_xeon(ntb); intel_ntb_teardown_interrupts(ntb); mtx_destroy(&ntb->db_mask_lock); intel_ntb_unmap_pci_bar(ntb); return (0); } /* * Driver internal routines */ static inline enum ntb_bar intel_ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw) { KASSERT(mw < ntb->mw_count, ("%s: mw:%u > count:%u", __func__, mw, (unsigned)ntb->mw_count)); KASSERT(ntb->reg->mw_bar[mw] != 0, ("invalid mw")); return (ntb->reg->mw_bar[mw]); } static inline bool bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar) { /* XXX This assertion could be stronger. */ KASSERT(bar < NTB_MAX_BARS, ("bogus bar")); return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(ntb, NTB_SPLIT_BAR)); } static inline void bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base, uint32_t *xlat, uint32_t *lmt) { uint32_t basev, lmtv, xlatv; switch (bar) { case NTB_B2B_BAR_1: basev = ntb->xlat_reg->bar2_base; lmtv = ntb->xlat_reg->bar2_limit; xlatv = ntb->xlat_reg->bar2_xlat; break; case NTB_B2B_BAR_2: basev = ntb->xlat_reg->bar4_base; lmtv = ntb->xlat_reg->bar4_limit; xlatv = ntb->xlat_reg->bar4_xlat; break; case NTB_B2B_BAR_3: basev = ntb->xlat_reg->bar5_base; lmtv = ntb->xlat_reg->bar5_limit; xlatv = ntb->xlat_reg->bar5_xlat; break; default: KASSERT(bar >= NTB_B2B_BAR_1 && bar < NTB_MAX_BARS, ("bad bar")); basev = lmtv = xlatv = 0; break; } if (base != NULL) *base = basev; if (xlat != NULL) *xlat = xlatv; if (lmt != NULL) *lmt = lmtv; } static int intel_ntb_map_pci_bars(struct ntb_softc *ntb) { struct ntb_pci_bar_info *bar; int rc; bar = &ntb->bar_info[NTB_CONFIG_BAR]; bar->pci_resource_id = PCIR_BAR(0); rc = map_mmr_bar(ntb, bar); if (rc != 0) goto out; /* * At least on Xeon v4 NTB device leaks to host some remote side * BAR0 writes supposed to update scratchpad registers. I am not * sure why it happens, but it may be related to the fact that * on a link side BAR0 is 32KB, while on a host side it is 64KB. * Without this hack DMAR blocks those accesses as not allowed. */ if (bus_dma_tag_create(bus_get_dma_tag(ntb->device), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, bar->size, 1, bar->size, 0, NULL, NULL, &ntb->bar0_dma_tag)) { device_printf(ntb->device, "Unable to create BAR0 tag\n"); return (ENOMEM); } if (bus_dmamap_create(ntb->bar0_dma_tag, 0, &ntb->bar0_dma_map)) { device_printf(ntb->device, "Unable to create BAR0 map\n"); return (ENOMEM); } if (bus_dma_iommu_load_ident(ntb->bar0_dma_tag, ntb->bar0_dma_map, bar->pbase, bar->size, 0)) { device_printf(ntb->device, "Unable to load BAR0 map\n"); return (ENOMEM); } bar = &ntb->bar_info[NTB_B2B_BAR_1]; bar->pci_resource_id = PCIR_BAR(2); rc = map_memory_window_bar(ntb, bar); if (rc != 0) goto out; if (ntb->type == NTB_XEON_GEN3) { bar->psz_off = XEON_GEN3_INT_REG_IMBAR1SZ; bar->ssz_off = XEON_GEN3_INT_REG_EMBAR1SZ; bar->pbarxlat_off = XEON_GEN3_REG_EMBAR1XBASE; } else { bar->psz_off = XEON_PBAR23SZ_OFFSET; bar->ssz_off = XEON_SBAR23SZ_OFFSET; bar->pbarxlat_off = XEON_PBAR2XLAT_OFFSET; } bar = &ntb->bar_info[NTB_B2B_BAR_2]; bar->pci_resource_id = PCIR_BAR(4); rc = map_memory_window_bar(ntb, bar); if (rc != 0) goto out; if (ntb->type == NTB_XEON_GEN3) { bar->psz_off = XEON_GEN3_INT_REG_IMBAR2SZ; bar->ssz_off = XEON_GEN3_INT_REG_EMBAR2SZ; bar->pbarxlat_off = XEON_GEN3_REG_EMBAR2XBASE; } else { bar->psz_off = XEON_PBAR4SZ_OFFSET; bar->ssz_off = XEON_SBAR4SZ_OFFSET; bar->pbarxlat_off = XEON_PBAR4XLAT_OFFSET; } if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR)) goto out; if (ntb->type == NTB_XEON_GEN3) { device_printf(ntb->device, "no split bar support\n"); return (ENXIO); } bar = &ntb->bar_info[NTB_B2B_BAR_3]; bar->pci_resource_id = PCIR_BAR(5); rc = map_memory_window_bar(ntb, bar); bar->psz_off = XEON_PBAR5SZ_OFFSET; bar->ssz_off = XEON_SBAR5SZ_OFFSET; bar->pbarxlat_off = XEON_PBAR5XLAT_OFFSET; out: if (rc != 0) device_printf(ntb->device, "unable to allocate pci resource\n"); return (rc); } static void print_map_success(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar, const char *kind) { device_printf(ntb->device, "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n", PCI_RID2BAR(bar->pci_resource_id), bar->vbase, (char *)bar->vbase + bar->size - 1, (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), (uintmax_t)bar->size, kind); } static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) { bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, &bar->pci_resource_id, RF_ACTIVE); if (bar->pci_resource == NULL) return (ENXIO); save_bar_parameters(bar); bar->map_mode = VM_MEMATTR_UNCACHEABLE; print_map_success(ntb, bar, "mmr"); return (0); } static int map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) { int rc; vm_memattr_t mapmode; uint8_t bar_size_bits = 0; bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, &bar->pci_resource_id, RF_ACTIVE); if (bar->pci_resource == NULL) return (ENXIO); save_bar_parameters(bar); /* * Ivytown NTB BAR sizes are misreported by the hardware due to a * hardware issue. To work around this, query the size it should be * configured to by the device and modify the resource to correspond to * this new size. The BIOS on systems with this problem is required to * provide enough address space to allow the driver to make this change * safely. * * Ideally I could have just specified the size when I allocated the * resource like: * bus_alloc_resource(ntb->device, * SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul, * 1ul << bar_size_bits, RF_ACTIVE); * but the PCI driver does not honor the size in this call, so we have * to modify it after the fact. */ if (HAS_FEATURE(ntb, NTB_BAR_SIZE_4K)) { if (bar->pci_resource_id == PCIR_BAR(2)) bar_size_bits = pci_read_config(ntb->device, XEON_PBAR23SZ_OFFSET, 1); else bar_size_bits = pci_read_config(ntb->device, XEON_PBAR45SZ_OFFSET, 1); rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY, bar->pci_resource, bar->pbase, bar->pbase + (1ul << bar_size_bits) - 1); if (rc != 0) { device_printf(ntb->device, "unable to resize bar\n"); return (rc); } save_bar_parameters(bar); } bar->map_mode = VM_MEMATTR_UNCACHEABLE; print_map_success(ntb, bar, "mw"); /* * Optionally, mark MW BARs as anything other than UC to improve * performance. */ mapmode = intel_ntb_pat_flags(); if (mapmode == bar->map_mode) return (0); rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mapmode); if (rc == 0) { bar->map_mode = mapmode; device_printf(ntb->device, "Marked BAR%d v:[%p-%p] p:[%p-%p] as " "%s.\n", PCI_RID2BAR(bar->pci_resource_id), bar->vbase, (char *)bar->vbase + bar->size - 1, (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), intel_ntb_vm_memattr_to_str(mapmode)); } else device_printf(ntb->device, "Unable to mark BAR%d v:[%p-%p] p:[%p-%p] as " "%s: %d\n", PCI_RID2BAR(bar->pci_resource_id), bar->vbase, (char *)bar->vbase + bar->size - 1, (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), intel_ntb_vm_memattr_to_str(mapmode), rc); /* Proceed anyway */ return (0); } static void intel_ntb_unmap_pci_bar(struct ntb_softc *ntb) { struct ntb_pci_bar_info *bar; int i; if (ntb->bar0_dma_map != NULL) { bus_dmamap_unload(ntb->bar0_dma_tag, ntb->bar0_dma_map); bus_dmamap_destroy(ntb->bar0_dma_tag, ntb->bar0_dma_map); } if (ntb->bar0_dma_tag != NULL) bus_dma_tag_destroy(ntb->bar0_dma_tag); for (i = 0; i < NTB_MAX_BARS; i++) { bar = &ntb->bar_info[i]; if (bar->pci_resource != NULL) bus_release_resource(ntb->device, SYS_RES_MEMORY, bar->pci_resource_id, bar->pci_resource); } } static int intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors) { uint32_t i; int rc; for (i = 0; i < num_vectors; i++) { ntb->int_info[i].rid = i + 1; ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE); if (ntb->int_info[i].res == NULL) { device_printf(ntb->device, "bus_alloc_resource failed\n"); return (ENOMEM); } ntb->int_info[i].tag = NULL; ntb->allocated_interrupts++; rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_vec_isr, &ntb->msix_vec[i], &ntb->int_info[i].tag); if (rc != 0) { device_printf(ntb->device, "bus_setup_intr failed\n"); return (ENXIO); } } return (0); } /* * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector * cannot be allocated for each MSI-X message. JHB seems to think remapping * should be okay. This tunable should enable us to test that hypothesis * when someone gets their hands on some Xeon hardware. */ static int ntb_force_remap_mode; SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN, &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped" " to a smaller number of ithreads, even if the desired number are " "available"); /* * In case it is NOT ok, give consumers an abort button. */ static int ntb_prefer_intx; SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN, &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather " "than remapping MSI-X messages over available slots (match Linux driver " "behavior)"); /* * Remap the desired number of MSI-X messages to available ithreads in a simple * round-robin fashion. */ static int intel_ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail) { u_int *vectors; uint32_t i; int rc; if (ntb_prefer_intx != 0) return (ENXIO); vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK); for (i = 0; i < desired; i++) vectors[i] = (i % avail) + 1; rc = pci_remap_msix(dev, desired, vectors); free(vectors, M_NTB); return (rc); } static int intel_ntb_xeon_gen3_init_isr(struct ntb_softc *ntb) { uint64_t i, reg; uint32_t desired_vectors, num_vectors; int rc; ntb->allocated_interrupts = 0; ntb->last_ts = ticks; /* Mask all the interrupts, including hardware interrupt */ intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_DISABLE, ~0ULL); /* Clear Interrupt Status */ reg = intel_ntb_reg_read(8, XEON_GEN3_REG_IMINT_STATUS); intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_STATUS, reg); num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device), XEON_GEN3_DB_MSIX_VECTOR_COUNT); rc = pci_alloc_msix(ntb->device, &num_vectors); if (rc != 0) { device_printf(ntb->device, "Interrupt allocation failed %d\n", rc); return (rc); } if (desired_vectors != num_vectors) { device_printf(ntb->device, "Couldn't get %d vectors\n", XEON_GEN3_DB_MSIX_VECTOR_COUNT); return (ENXIO); } /* 32 db + 1 hardware */ if (num_vectors == XEON_GEN3_DB_MSIX_VECTOR_COUNT) { /* Program INTVECXX source register */ for (i = 0; i < XEON_GEN3_DB_MSIX_VECTOR_COUNT; i++) { /* interrupt source i for vector i */ intel_ntb_reg_write(1, XEON_GEN3_REG_IMINTVEC00 + i, i); if (i == (XEON_GEN3_DB_MSIX_VECTOR_COUNT - 1)) { intel_ntb_reg_write(1, XEON_GEN3_REG_IMINTVEC00 + i, XEON_GEN3_LINK_VECTOR_INDEX); } } intel_ntb_create_msix_vec(ntb, num_vectors); rc = intel_ntb_setup_msix(ntb, num_vectors); /* enable all interrupts */ intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_DISABLE, 0ULL); } else { device_printf(ntb->device, "need to remap interrupts, giving up.\n"); return (ENXIO); } return (0); } static int intel_ntb_init_isr(struct ntb_softc *ntb) { uint32_t desired_vectors, num_vectors; int rc; ntb->allocated_interrupts = 0; ntb->last_ts = ticks; /* * Mask all doorbell interrupts. (Except link events!) */ DB_MASK_LOCK(ntb); ntb->db_mask = ntb->db_valid_mask; db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); DB_MASK_UNLOCK(ntb); num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device), ntb->db_count); if (desired_vectors >= 1) { rc = pci_alloc_msix(ntb->device, &num_vectors); if (ntb_force_remap_mode != 0 && rc == 0 && num_vectors == desired_vectors) num_vectors--; if (rc == 0 && num_vectors < desired_vectors) { rc = intel_ntb_remap_msix(ntb->device, desired_vectors, num_vectors); if (rc == 0) num_vectors = desired_vectors; else pci_release_msi(ntb->device); } if (rc != 0) num_vectors = 1; } else num_vectors = 1; if (ntb->type == NTB_XEON_GEN1 && num_vectors < ntb->db_vec_count) { if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { device_printf(ntb->device, "Errata workaround does not support MSI or INTX\n"); return (EINVAL); } ntb->db_vec_count = 1; ntb->db_vec_shift = XEON_DB_TOTAL_SHIFT; rc = intel_ntb_setup_legacy_interrupt(ntb); } else { if (num_vectors - 1 != XEON_NONLINK_DB_MSIX_BITS && HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { device_printf(ntb->device, "Errata workaround expects %d doorbell bits\n", XEON_NONLINK_DB_MSIX_BITS); return (EINVAL); } intel_ntb_create_msix_vec(ntb, num_vectors); rc = intel_ntb_setup_msix(ntb, num_vectors); } if (rc != 0) { device_printf(ntb->device, "Error allocating interrupts: %d\n", rc); intel_ntb_free_msix_vec(ntb); } return (rc); } static int intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb) { int rc; ntb->int_info[0].rid = 0; ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE); if (ntb->int_info[0].res == NULL) { device_printf(ntb->device, "bus_alloc_resource failed\n"); return (ENOMEM); } ntb->int_info[0].tag = NULL; ntb->allocated_interrupts = 1; rc = bus_setup_intr(ntb->device, ntb->int_info[0].res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_irq_isr, ntb, &ntb->int_info[0].tag); if (rc != 0) { device_printf(ntb->device, "bus_setup_intr failed\n"); return (ENXIO); } return (0); } static void intel_ntb_teardown_interrupts(struct ntb_softc *ntb) { struct ntb_int_info *current_int; int i; for (i = 0; i < ntb->allocated_interrupts; i++) { current_int = &ntb->int_info[i]; if (current_int->tag != NULL) bus_teardown_intr(ntb->device, current_int->res, current_int->tag); if (current_int->res != NULL) bus_release_resource(ntb->device, SYS_RES_IRQ, rman_get_rid(current_int->res), current_int->res); } intel_ntb_free_msix_vec(ntb); pci_release_msi(ntb->device); } static inline uint64_t db_ioread(struct ntb_softc *ntb, uint64_t regoff) { switch (ntb->type) { case NTB_ATOM: case NTB_XEON_GEN3: return (intel_ntb_reg_read(8, regoff)); case NTB_XEON_GEN1: return (intel_ntb_reg_read(2, regoff)); } __assert_unreachable(); } static inline void db_iowrite(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) { KASSERT((val & ~ntb->db_valid_mask) == 0, ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, (uintmax_t)(val & ~ntb->db_valid_mask), (uintmax_t)ntb->db_valid_mask)); if (regoff == ntb->self_reg->db_mask) DB_MASK_ASSERT(ntb, MA_OWNED); db_iowrite_raw(ntb, regoff, val); } static inline void db_iowrite_raw(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) { switch (ntb->type) { case NTB_ATOM: case NTB_XEON_GEN3: intel_ntb_reg_write(8, regoff, val); break; case NTB_XEON_GEN1: intel_ntb_reg_write(2, regoff, (uint16_t)val); break; } } static void intel_ntb_db_set_mask(device_t dev, uint64_t bits) { struct ntb_softc *ntb = device_get_softc(dev); DB_MASK_LOCK(ntb); ntb->db_mask |= bits; if (!HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); DB_MASK_UNLOCK(ntb); } static void intel_ntb_db_clear_mask(device_t dev, uint64_t bits) { struct ntb_softc *ntb = device_get_softc(dev); uint64_t ibits; int i; KASSERT((bits & ~ntb->db_valid_mask) == 0, ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, (uintmax_t)(bits & ~ntb->db_valid_mask), (uintmax_t)ntb->db_valid_mask)); DB_MASK_LOCK(ntb); ibits = ntb->fake_db & ntb->db_mask & bits; ntb->db_mask &= ~bits; if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { /* Simulate fake interrupts if unmasked DB bits are set. */ ntb->force_db |= ibits; for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { if ((ibits & intel_ntb_db_vector_mask(dev, i)) != 0) swi_sched(ntb->int_info[i].tag, 0); } } else { db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); } DB_MASK_UNLOCK(ntb); } static uint64_t intel_ntb_db_read(device_t dev) { struct ntb_softc *ntb = device_get_softc(dev); if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) return (ntb->fake_db); if (ntb->type == NTB_XEON_GEN3) return (intel_ntb_reg_read(8, XEON_GEN3_REG_IMINT_STATUS)); else return (db_ioread(ntb, ntb->self_reg->db_bell)); } static void intel_ntb_db_clear(device_t dev, uint64_t bits) { struct ntb_softc *ntb = device_get_softc(dev); KASSERT((bits & ~ntb->db_valid_mask) == 0, ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, (uintmax_t)(bits & ~ntb->db_valid_mask), (uintmax_t)ntb->db_valid_mask)); if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { DB_MASK_LOCK(ntb); ntb->fake_db &= ~bits; DB_MASK_UNLOCK(ntb); return; } if (ntb->type == NTB_XEON_GEN3) intel_ntb_reg_write(4, XEON_GEN3_REG_IMINT_STATUS, (uint32_t)bits); else db_iowrite(ntb, ntb->self_reg->db_bell, bits); } static inline uint64_t intel_ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector) { uint64_t shift, mask; if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { /* * Remap vectors in custom way to make at least first * three doorbells to not generate stray events. * This breaks Linux compatibility (if one existed) * when more then one DB is used (not by if_ntb). */ if (db_vector < XEON_NONLINK_DB_MSIX_BITS - 1) return (1 << db_vector); if (db_vector == XEON_NONLINK_DB_MSIX_BITS - 1) return (0x7ffc); } shift = ntb->db_vec_shift; mask = (1ull << shift) - 1; return (mask << (shift * db_vector)); } static void intel_ntb_interrupt(struct ntb_softc *ntb, uint32_t vec) { uint64_t vec_mask; ntb->last_ts = ticks; vec_mask = intel_ntb_vec_mask(ntb, vec); if (ntb->type == NTB_XEON_GEN3 && vec == XEON_GEN3_LINK_VECTOR_INDEX) vec_mask |= ntb->db_link_mask; if ((vec_mask & ntb->db_link_mask) != 0) { if (intel_ntb_poll_link(ntb)) ntb_link_event(ntb->device); if (ntb->type == NTB_XEON_GEN3) intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_STATUS, intel_ntb_reg_read(8, XEON_GEN3_REG_IMINT_STATUS)); } if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) && (vec_mask & ntb->db_link_mask) == 0) { DB_MASK_LOCK(ntb); /* * Do not report same DB events again if not cleared yet, * unless the mask was just cleared for them and this * interrupt handler call can be the consequence of it. */ vec_mask &= ~ntb->fake_db | ntb->force_db; ntb->force_db &= ~vec_mask; /* Update our internal doorbell register. */ ntb->fake_db |= vec_mask; /* Do not report masked DB events. */ vec_mask &= ~ntb->db_mask; DB_MASK_UNLOCK(ntb); } if ((vec_mask & ntb->db_valid_mask) != 0) ntb_db_event(ntb->device, vec); } static void ndev_vec_isr(void *arg) { struct ntb_vec *nvec = arg; intel_ntb_interrupt(nvec->ntb, nvec->num); } static void ndev_irq_isr(void *arg) { /* If we couldn't set up MSI-X, we only have the one vector. */ intel_ntb_interrupt(arg, 0); } static int intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors) { uint32_t i; ntb->msix_vec = malloc(num_vectors * sizeof(*ntb->msix_vec), M_NTB, M_ZERO | M_WAITOK); for (i = 0; i < num_vectors; i++) { ntb->msix_vec[i].num = i; ntb->msix_vec[i].ntb = ntb; } return (0); } static void intel_ntb_free_msix_vec(struct ntb_softc *ntb) { if (ntb->msix_vec == NULL) return; free(ntb->msix_vec, M_NTB); ntb->msix_vec = NULL; } static void intel_ntb_get_msix_info(struct ntb_softc *ntb) { struct pci_devinfo *dinfo; struct pcicfg_msix *msix; uint32_t laddr, data, i, offset; dinfo = device_get_ivars(ntb->device); msix = &dinfo->cfg.msix; CTASSERT(XEON_NONLINK_DB_MSIX_BITS == nitems(ntb->msix_data)); for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { offset = msix->msix_table_offset + i * PCI_MSIX_ENTRY_SIZE; laddr = bus_read_4(msix->msix_table_res, offset + PCI_MSIX_ENTRY_LOWER_ADDR); intel_ntb_printf(2, "local MSIX addr(%u): 0x%x\n", i, laddr); KASSERT((laddr & MSI_INTEL_ADDR_BASE) == MSI_INTEL_ADDR_BASE, ("local MSIX addr 0x%x not in MSI base 0x%x", laddr, MSI_INTEL_ADDR_BASE)); ntb->msix_data[i].nmd_ofs = laddr; data = bus_read_4(msix->msix_table_res, offset + PCI_MSIX_ENTRY_DATA); intel_ntb_printf(2, "local MSIX data(%u): 0x%x\n", i, data); ntb->msix_data[i].nmd_data = data; } } static struct ntb_hw_info * intel_ntb_get_device_info(uint32_t device_id) { struct ntb_hw_info *ep; for (ep = pci_ids; ep < &pci_ids[nitems(pci_ids)]; ep++) { if (ep->device_id == device_id) return (ep); } return (NULL); } static void intel_ntb_teardown_xeon(struct ntb_softc *ntb) { if (ntb->reg != NULL) intel_ntb_link_disable(ntb->device); } static void intel_ntb_detect_max_mw(struct ntb_softc *ntb) { switch (ntb->type) { case NTB_ATOM: ntb->mw_count = ATOM_MW_COUNT; break; case NTB_XEON_GEN1: if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT; else ntb->mw_count = XEON_SNB_MW_COUNT; break; case NTB_XEON_GEN3: if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) ntb->mw_count = XEON_GEN3_SPLIT_MW_COUNT; else ntb->mw_count = XEON_GEN3_MW_COUNT; break; } } static int intel_ntb_detect_xeon(struct ntb_softc *ntb) { uint8_t ppd, conn_type; ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1); ntb->ppd = ppd; if ((ppd & XEON_PPD_DEV_TYPE) != 0) ntb->dev_type = NTB_DEV_DSD; else ntb->dev_type = NTB_DEV_USD; if ((ppd & XEON_PPD_SPLIT_BAR) != 0) ntb->features |= NTB_SPLIT_BAR; if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) && !HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { device_printf(ntb->device, "Can not apply SB01BASE_LOCKUP workaround " "with split BARs disabled!\n"); device_printf(ntb->device, "Expect system hangs under heavy NTB traffic!\n"); ntb->features &= ~NTB_SB01BASE_LOCKUP; } /* * SDOORBELL errata workaround gets in the way of SB01BASE_LOCKUP * errata workaround; only do one at a time. */ if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) ntb->features &= ~NTB_SDOORBELL_LOCKUP; conn_type = ppd & XEON_PPD_CONN_TYPE; switch (conn_type) { case NTB_CONN_B2B: ntb->conn_type = conn_type; break; case NTB_CONN_RP: case NTB_CONN_TRANSPARENT: default: device_printf(ntb->device, "Unsupported connection type: %u\n", (unsigned)conn_type); return (ENXIO); } return (0); } static int intel_ntb_detect_atom(struct ntb_softc *ntb) { uint32_t ppd, conn_type; ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4); ntb->ppd = ppd; if ((ppd & ATOM_PPD_DEV_TYPE) != 0) ntb->dev_type = NTB_DEV_DSD; else ntb->dev_type = NTB_DEV_USD; conn_type = (ppd & ATOM_PPD_CONN_TYPE) >> 8; switch (conn_type) { case NTB_CONN_B2B: ntb->conn_type = conn_type; break; default: device_printf(ntb->device, "Unsupported NTB configuration\n"); return (ENXIO); } return (0); } static int intel_ntb_detect_xeon_gen3(struct ntb_softc *ntb) { uint8_t ppd, conn_type; ppd = pci_read_config(ntb->device, XEON_GEN3_INT_REG_PPD, 1); ntb->ppd = ppd; /* check port definition */ conn_type = XEON_GEN3_REG_PPD_PORT_DEF_F(ppd); switch (conn_type) { case NTB_CONN_B2B: ntb->conn_type = conn_type; break; default: device_printf(ntb->device, "Unsupported connection type: %u\n", conn_type); return (ENXIO); } /* check cross link configuration status */ if (XEON_GEN3_REG_PPD_CONF_STS_F(ppd)) { /* NTB Port is configured as DSD/USP */ ntb->dev_type = NTB_DEV_DSD; } else { /* NTB Port is configured as USD/DSP */ ntb->dev_type = NTB_DEV_USD; } if (XEON_GEN3_REG_PPD_ONE_MSIX_F(ppd)) { /* * This bit when set, causes only a single MSI-X message to be * generated if MSI-X is enabled. */ ntb->features |= NTB_ONE_MSIX; } if (XEON_GEN3_REG_PPD_BAR45_SPL_F(ppd)) { /* BARs 4 and 5 are presented as two 32b non-prefetchable BARs */ ntb->features |= NTB_SPLIT_BAR; } device_printf(ntb->device, "conn type 0x%02x, dev type 0x%02x," "features 0x%02x\n", ntb->conn_type, ntb->dev_type, ntb->features); return (0); } static int intel_ntb_xeon_init_dev(struct ntb_softc *ntb) { int rc; ntb->spad_count = XEON_SPAD_COUNT; ntb->db_count = XEON_DB_COUNT; ntb->db_link_mask = XEON_DB_LINK_BIT; ntb->db_vec_count = XEON_DB_MSIX_VECTOR_COUNT; ntb->db_vec_shift = XEON_DB_MSIX_VECTOR_SHIFT; if (ntb->conn_type != NTB_CONN_B2B) { device_printf(ntb->device, "Connection type %d not supported\n", ntb->conn_type); return (ENXIO); } ntb->reg = &xeon_reg; ntb->self_reg = &xeon_pri_reg; ntb->peer_reg = &xeon_b2b_reg; ntb->xlat_reg = &xeon_sec_xlat; if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { ntb->force_db = ntb->fake_db = 0; ntb->msix_mw_idx = (ntb->mw_count + g_ntb_msix_idx) % ntb->mw_count; intel_ntb_printf(2, "Setting up MSIX mw idx %d means %u\n", g_ntb_msix_idx, ntb->msix_mw_idx); rc = intel_ntb_mw_set_wc_internal(ntb, ntb->msix_mw_idx, VM_MEMATTR_UNCACHEABLE); KASSERT(rc == 0, ("shouldn't fail")); } else if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) { /* * There is a Xeon hardware errata related to writes to SDOORBELL or * B2BDOORBELL in conjunction with inbound access to NTB MMIO space, * which may hang the system. To workaround this, use a memory * window to access the interrupt and scratch pad registers on the * remote system. */ ntb->b2b_mw_idx = (ntb->mw_count + g_ntb_mw_idx) % ntb->mw_count; intel_ntb_printf(2, "Setting up b2b mw idx %d means %u\n", g_ntb_mw_idx, ntb->b2b_mw_idx); rc = intel_ntb_mw_set_wc_internal(ntb, ntb->b2b_mw_idx, VM_MEMATTR_UNCACHEABLE); KASSERT(rc == 0, ("shouldn't fail")); } else if (HAS_FEATURE(ntb, NTB_B2BDOORBELL_BIT14)) /* * HW Errata on bit 14 of b2bdoorbell register. Writes will not be * mirrored to the remote system. Shrink the number of bits by one, * since bit 14 is the last bit. * * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register * anyway. Nor for non-B2B connection types. */ ntb->db_count = XEON_DB_COUNT - 1; ntb->db_valid_mask = (1ull << ntb->db_count) - 1; if (ntb->dev_type == NTB_DEV_USD) rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_dsd_addr, &xeon_b2b_usd_addr); else rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_usd_addr, &xeon_b2b_dsd_addr); if (rc != 0) return (rc); /* Enable Bus Master and Memory Space on the secondary side */ intel_ntb_reg_write(2, XEON_SPCICMD_OFFSET, PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); /* * Mask all doorbell interrupts. */ DB_MASK_LOCK(ntb); ntb->db_mask = ntb->db_valid_mask; db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); DB_MASK_UNLOCK(ntb); rc = intel_ntb_init_isr(ntb); return (rc); } static int intel_ntb_xeon_gen3_init_dev(struct ntb_softc *ntb) { int rc; ntb->spad_count = XEON_GEN3_SPAD_COUNT; ntb->db_count = XEON_GEN3_DB_COUNT; ntb->db_link_mask = XEON_GEN3_DB_LINK_BIT; ntb->db_vec_count = XEON_GEN3_DB_MSIX_VECTOR_COUNT; ntb->db_vec_shift = XEON_GEN3_DB_MSIX_VECTOR_SHIFT; if (ntb->conn_type != NTB_CONN_B2B) { device_printf(ntb->device, "Connection type %d not supported\n", ntb->conn_type); return (ENXIO); } ntb->reg = &xeon_gen3_reg; ntb->self_reg = &xeon_gen3_pri_reg; ntb->peer_reg = &xeon_gen3_b2b_reg; ntb->xlat_reg = &xeon_gen3_sec_xlat; ntb->db_valid_mask = (1ULL << ntb->db_count) - 1; xeon_gen3_setup_b2b_mw(ntb); /* Enable Bus Master and Memory Space on the External Side */ intel_ntb_reg_write(2, XEON_GEN3_EXT_REG_PCI_CMD, PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); /* Setup Interrupt */ rc = intel_ntb_xeon_gen3_init_isr(ntb); return (rc); } static int intel_ntb_atom_init_dev(struct ntb_softc *ntb) { int error; KASSERT(ntb->conn_type == NTB_CONN_B2B, ("Unsupported NTB configuration (%d)\n", ntb->conn_type)); ntb->spad_count = ATOM_SPAD_COUNT; ntb->db_count = ATOM_DB_COUNT; ntb->db_vec_count = ATOM_DB_MSIX_VECTOR_COUNT; ntb->db_vec_shift = ATOM_DB_MSIX_VECTOR_SHIFT; ntb->db_valid_mask = (1ull << ntb->db_count) - 1; ntb->reg = &atom_reg; ntb->self_reg = &atom_pri_reg; ntb->peer_reg = &atom_b2b_reg; ntb->xlat_reg = &atom_sec_xlat; /* * FIXME - MSI-X bug on early Atom HW, remove once internal issue is * resolved. Mask transaction layer internal parity errors. */ pci_write_config(ntb->device, 0xFC, 0x4, 4); configure_atom_secondary_side_bars(ntb); /* Enable Bus Master and Memory Space on the secondary side */ intel_ntb_reg_write(2, ATOM_SPCICMD_OFFSET, PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); error = intel_ntb_init_isr(ntb); if (error != 0) return (error); /* Initiate PCI-E link training */ intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb); return (0); } /* XXX: Linux driver doesn't seem to do any of this for Atom. */ static void configure_atom_secondary_side_bars(struct ntb_softc *ntb) { if (ntb->dev_type == NTB_DEV_USD) { intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, XEON_B2B_BAR2_ADDR64); intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, XEON_B2B_BAR4_ADDR64); intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); } else { intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, XEON_B2B_BAR2_ADDR64); intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, XEON_B2B_BAR4_ADDR64); intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); } } /* * When working around Xeon SDOORBELL errata by remapping remote registers in a * MW, limit the B2B MW to half a MW. By sharing a MW, half the shared MW * remains for use by a higher layer. * * Will only be used if working around SDOORBELL errata and the BIOS-configured * MW size is sufficiently large. */ static unsigned int ntb_b2b_mw_share; SYSCTL_UINT(_hw_ntb, OID_AUTO, b2b_mw_share, CTLFLAG_RDTUN, &ntb_b2b_mw_share, 0, "If enabled (non-zero), prefer to share half of the B2B peer register " "MW with higher level consumers. Both sides of the NTB MUST set the same " "value here."); static void xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx, enum ntb_bar regbar) { struct ntb_pci_bar_info *bar; uint8_t bar_sz; if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3) return; bar = &ntb->bar_info[idx]; bar_sz = pci_read_config(ntb->device, bar->psz_off, 1); if (idx == regbar) { if (ntb->b2b_off != 0) bar_sz--; else bar_sz = 0; } pci_write_config(ntb->device, bar->ssz_off, bar_sz, 1); bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1); (void)bar_sz; } static void xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr, enum ntb_bar idx, enum ntb_bar regbar) { uint64_t reg_val; uint32_t base_reg, lmt_reg; bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg); if (idx == regbar) { if (ntb->b2b_off) bar_addr += ntb->b2b_off; else bar_addr = 0; } if (!bar_is_64bit(ntb, idx)) { intel_ntb_reg_write(4, base_reg, bar_addr); reg_val = intel_ntb_reg_read(4, base_reg); (void)reg_val; intel_ntb_reg_write(4, lmt_reg, bar_addr); reg_val = intel_ntb_reg_read(4, lmt_reg); (void)reg_val; } else { intel_ntb_reg_write(8, base_reg, bar_addr); reg_val = intel_ntb_reg_read(8, base_reg); (void)reg_val; intel_ntb_reg_write(8, lmt_reg, bar_addr); reg_val = intel_ntb_reg_read(8, lmt_reg); (void)reg_val; } } static void xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx) { struct ntb_pci_bar_info *bar; bar = &ntb->bar_info[idx]; if (HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) { intel_ntb_reg_write(4, bar->pbarxlat_off, base_addr); base_addr = intel_ntb_reg_read(4, bar->pbarxlat_off); } else { intel_ntb_reg_write(8, bar->pbarxlat_off, base_addr); base_addr = intel_ntb_reg_read(8, bar->pbarxlat_off); } (void)base_addr; } static int xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr) { struct ntb_pci_bar_info *b2b_bar; vm_size_t bar_size; uint64_t bar_addr; enum ntb_bar b2b_bar_num, i; if (ntb->b2b_mw_idx == B2B_MW_DISABLED) { b2b_bar = NULL; b2b_bar_num = NTB_CONFIG_BAR; ntb->b2b_off = 0; } else { b2b_bar_num = intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx); KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS, ("invalid b2b mw bar")); b2b_bar = &ntb->bar_info[b2b_bar_num]; bar_size = b2b_bar->size; if (ntb_b2b_mw_share != 0 && (bar_size >> 1) >= XEON_B2B_MIN_SIZE) ntb->b2b_off = bar_size >> 1; else if (bar_size >= XEON_B2B_MIN_SIZE) { ntb->b2b_off = 0; } else { device_printf(ntb->device, "B2B bar size is too small!\n"); return (EIO); } } /* * Reset the secondary bar sizes to match the primary bar sizes. * (Except, disable or halve the size of the B2B secondary bar.) */ for (i = NTB_B2B_BAR_1; i < NTB_MAX_BARS; i++) xeon_reset_sbar_size(ntb, i, b2b_bar_num); bar_addr = 0; if (b2b_bar_num == NTB_CONFIG_BAR) bar_addr = addr->bar0_addr; else if (b2b_bar_num == NTB_B2B_BAR_1) bar_addr = addr->bar2_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR)) bar_addr = addr->bar4_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2) bar_addr = addr->bar4_addr32; else if (b2b_bar_num == NTB_B2B_BAR_3) bar_addr = addr->bar5_addr32; else KASSERT(false, ("invalid bar")); intel_ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr); /* * Other SBARs are normally hit by the PBAR xlat, except for the b2b * register BAR. The B2B BAR is either disabled above or configured * half-size. It starts at PBAR xlat + offset. * * Also set up incoming BAR limits == base (zero length window). */ xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1, b2b_bar_num); if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32, NTB_B2B_BAR_2, b2b_bar_num); xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32, NTB_B2B_BAR_3, b2b_bar_num); } else xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr64, NTB_B2B_BAR_2, b2b_bar_num); /* Zero incoming translation addrs */ intel_ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0); intel_ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0); if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { uint32_t xlat_reg, lmt_reg; enum ntb_bar bar_num; /* * We point the chosen MSIX MW BAR xlat to remote LAPIC for * workaround */ bar_num = intel_ntb_mw_to_bar(ntb, ntb->msix_mw_idx); bar_get_xlat_params(ntb, bar_num, NULL, &xlat_reg, &lmt_reg); if (bar_is_64bit(ntb, bar_num)) { intel_ntb_reg_write(8, xlat_reg, MSI_INTEL_ADDR_BASE); ntb->msix_xlat = intel_ntb_reg_read(8, xlat_reg); intel_ntb_reg_write(8, lmt_reg, 0); } else { intel_ntb_reg_write(4, xlat_reg, MSI_INTEL_ADDR_BASE); ntb->msix_xlat = intel_ntb_reg_read(4, xlat_reg); intel_ntb_reg_write(4, lmt_reg, 0); } ntb->peer_lapic_bar = &ntb->bar_info[bar_num]; } (void)intel_ntb_reg_read(8, XEON_SBAR2XLAT_OFFSET); (void)intel_ntb_reg_read(8, XEON_SBAR4XLAT_OFFSET); /* Zero outgoing translation limits (whole bar size windows) */ intel_ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0); intel_ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0); /* Set outgoing translation offsets */ xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1); if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2); xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3); } else xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr64, NTB_B2B_BAR_2); /* Set the translation offset for B2B registers */ bar_addr = 0; if (b2b_bar_num == NTB_CONFIG_BAR) bar_addr = peer_addr->bar0_addr; else if (b2b_bar_num == NTB_B2B_BAR_1) bar_addr = peer_addr->bar2_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR)) bar_addr = peer_addr->bar4_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2) bar_addr = peer_addr->bar4_addr32; else if (b2b_bar_num == NTB_B2B_BAR_3) bar_addr = peer_addr->bar5_addr32; else KASSERT(false, ("invalid bar")); /* * B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits * at a time. */ intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff); intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32); return (0); } static int xeon_gen3_setup_b2b_mw(struct ntb_softc *ntb) { uint64_t reg; uint32_t embarsz, imbarsz; /* IMBAR1SZ should be equal to EMBAR1SZ */ embarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_EMBAR1SZ, 1); imbarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_IMBAR1SZ, 1); if (embarsz != imbarsz) { device_printf(ntb->device, "IMBAR1SZ (%u) should be equal to EMBAR1SZ (%u)\n", imbarsz, embarsz); return (EIO); } /* IMBAR2SZ should be equal to EMBAR2SZ */ embarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_EMBAR2SZ, 1); imbarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_IMBAR2SZ, 1); if (embarsz != imbarsz) { device_printf(ntb->device, "IMBAR2SZ (%u) should be equal to EMBAR2SZ (%u)\n", imbarsz, embarsz); return (EIO); } /* Client will provide the incoming IMBAR1/2XBASE, zero it for now */ intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR1XBASE, 0); intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR2XBASE, 0); /* * If the value in EMBAR1LIMIT is set equal to the value in EMBAR1, * the memory window for EMBAR1 is disabled. * Note: It is needed to avoid malacious access. */ reg = pci_read_config(ntb->device, XEON_GEN3_EXT_REG_BAR1BASE, 8); intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR1XLIMIT, reg); reg = pci_read_config(ntb->device, XEON_GEN3_EXT_REG_BAR2BASE, 8); intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR2XLIMIT, reg); return (0); } static inline bool _xeon_link_is_up(struct ntb_softc *ntb) { if (ntb->conn_type == NTB_CONN_TRANSPARENT) return (true); return ((ntb->lnk_sta & NTB_LINK_STATUS_ACTIVE) != 0); } static inline bool link_is_up(struct ntb_softc *ntb) { if (ntb->type == NTB_XEON_GEN1 || ntb->type == NTB_XEON_GEN3) return (_xeon_link_is_up(ntb) && (ntb->peer_msix_good || !HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))); KASSERT(ntb->type == NTB_ATOM, ("ntb type")); return ((ntb->ntb_ctl & ATOM_CNTL_LINK_DOWN) == 0); } static inline bool atom_link_is_err(struct ntb_softc *ntb) { uint32_t status; KASSERT(ntb->type == NTB_ATOM, ("ntb type")); status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0) return (true); status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); return ((status & ATOM_IBIST_ERR_OFLOW) != 0); } /* Atom does not have link status interrupt, poll on that platform */ static void atom_link_hb(void *arg) { struct ntb_softc *ntb = arg; sbintime_t timo, poll_ts; timo = NTB_HB_TIMEOUT * hz; poll_ts = ntb->last_ts + timo; /* * Delay polling the link status if an interrupt was received, unless * the cached link status says the link is down. */ if ((sbintime_t)ticks - poll_ts < 0 && link_is_up(ntb)) { timo = poll_ts - ticks; goto out; } if (intel_ntb_poll_link(ntb)) ntb_link_event(ntb->device); if (!link_is_up(ntb) && atom_link_is_err(ntb)) { /* Link is down with error, proceed with recovery */ callout_reset(&ntb->lr_timer, 0, recover_atom_link, ntb); return; } out: callout_reset(&ntb->heartbeat_timer, timo, atom_link_hb, ntb); } static void atom_perform_link_restart(struct ntb_softc *ntb) { uint32_t status; /* Driver resets the NTB ModPhy lanes - magic! */ intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0); intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40); intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60); intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60); /* Driver waits 100ms to allow the NTB ModPhy to settle */ pause("ModPhy", hz / 10); /* Clear AER Errors, write to clear */ status = intel_ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET); status &= PCIM_AER_COR_REPLAY_ROLLOVER; intel_ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status); /* Clear unexpected electrical idle event in LTSSM, write to clear */ status = intel_ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET); status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI; intel_ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status); /* Clear DeSkew Buffer error, write to clear */ status = intel_ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET); status |= ATOM_DESKEWSTS_DBERR; intel_ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status); status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); status &= ATOM_IBIST_ERR_OFLOW; intel_ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status); /* Releases the NTB state machine to allow the link to retrain */ status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT; intel_ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status); } static int intel_ntb_port_number(device_t dev) { struct ntb_softc *ntb = device_get_softc(dev); return (ntb->dev_type == NTB_DEV_USD ? 0 : 1); } static int intel_ntb_peer_port_count(device_t dev) { return (1); } static int intel_ntb_peer_port_number(device_t dev, int pidx) { struct ntb_softc *ntb = device_get_softc(dev); if (pidx != 0) return (-EINVAL); return (ntb->dev_type == NTB_DEV_USD ? 1 : 0); } static int intel_ntb_peer_port_idx(device_t dev, int port) { int peer_port; peer_port = intel_ntb_peer_port_number(dev, 0); if (peer_port == -EINVAL || port != peer_port) return (-EINVAL); return (0); } static int intel_ntb_link_enable(device_t dev, enum ntb_speed speed __unused, enum ntb_width width __unused) { struct ntb_softc *ntb = device_get_softc(dev); uint32_t cntl; intel_ntb_printf(2, "%s\n", __func__); if (ntb->type == NTB_ATOM) { pci_write_config(ntb->device, NTB_PPD_OFFSET, ntb->ppd | ATOM_PPD_INIT_LINK, 4); return (0); } if (ntb->conn_type == NTB_CONN_TRANSPARENT) { ntb_link_event(dev); return (0); } cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl); cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK); cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP; cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP; if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP; intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); return (0); } static int intel_ntb_link_disable(device_t dev) { struct ntb_softc *ntb = device_get_softc(dev); uint32_t cntl; intel_ntb_printf(2, "%s\n", __func__); if (ntb->conn_type == NTB_CONN_TRANSPARENT) { ntb_link_event(dev); return (0); } cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl); cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP); cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP); if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP); cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK; intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); return (0); } static bool intel_ntb_link_enabled(device_t dev) { struct ntb_softc *ntb = device_get_softc(dev); uint32_t cntl; if (ntb->type == NTB_ATOM) { cntl = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4); return ((cntl & ATOM_PPD_INIT_LINK) != 0); } if (ntb->conn_type == NTB_CONN_TRANSPARENT) return (true); cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl); return ((cntl & NTB_CNTL_LINK_DISABLE) == 0); } static void recover_atom_link(void *arg) { struct ntb_softc *ntb = arg; unsigned speed, width, oldspeed, oldwidth; uint32_t status32; atom_perform_link_restart(ntb); /* * There is a potential race between the 2 NTB devices recovering at * the same time. If the times are the same, the link will not recover * and the driver will be stuck in this loop forever. Add a random * interval to the recovery time to prevent this race. */ status32 = arc4random() % ATOM_LINK_RECOVERY_TIME; pause("Link", (ATOM_LINK_RECOVERY_TIME + status32) * hz / 1000); if (atom_link_is_err(ntb)) goto retry; status32 = intel_ntb_reg_read(4, ntb->reg->ntb_ctl); if ((status32 & ATOM_CNTL_LINK_DOWN) != 0) goto out; status32 = intel_ntb_reg_read(4, ntb->reg->lnk_sta); width = NTB_LNK_STA_WIDTH(status32); speed = status32 & NTB_LINK_SPEED_MASK; oldwidth = NTB_LNK_STA_WIDTH(ntb->lnk_sta); oldspeed = ntb->lnk_sta & NTB_LINK_SPEED_MASK; if (oldwidth != width || oldspeed != speed) goto retry; out: callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, atom_link_hb, ntb); return; retry: callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_atom_link, ntb); } /* * Polls the HW link status register(s); returns true if something has changed. */ static bool intel_ntb_poll_link(struct ntb_softc *ntb) { uint32_t ntb_cntl; uint16_t reg_val; if (ntb->type == NTB_ATOM) { ntb_cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl); if (ntb_cntl == ntb->ntb_ctl) return (false); ntb->ntb_ctl = ntb_cntl; ntb->lnk_sta = intel_ntb_reg_read(4, ntb->reg->lnk_sta); } else { if (ntb->type == NTB_XEON_GEN1) db_iowrite_raw(ntb, ntb->self_reg->db_bell, ntb->db_link_mask); reg_val = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2); if (reg_val == ntb->lnk_sta) return (false); ntb->lnk_sta = reg_val; if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { if (_xeon_link_is_up(ntb)) { if (!ntb->peer_msix_good) { callout_reset(&ntb->peer_msix_work, 0, intel_ntb_exchange_msix, ntb); return (false); } } else { ntb->peer_msix_good = false; ntb->peer_msix_done = false; } } } return (true); } static inline enum ntb_speed intel_ntb_link_sta_speed(struct ntb_softc *ntb) { if (!link_is_up(ntb)) return (NTB_SPEED_NONE); return (ntb->lnk_sta & NTB_LINK_SPEED_MASK); } static inline enum ntb_width intel_ntb_link_sta_width(struct ntb_softc *ntb) { if (!link_is_up(ntb)) return (NTB_WIDTH_NONE); return (NTB_LNK_STA_WIDTH(ntb->lnk_sta)); } SYSCTL_NODE(_hw_ntb, OID_AUTO, debug_info, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "Driver state, statistics, and HW registers"); #define NTB_REGSZ_MASK (3ul << 30) #define NTB_REG_64 (1ul << 30) #define NTB_REG_32 (2ul << 30) #define NTB_REG_16 (3ul << 30) #define NTB_REG_8 (0ul << 30) #define NTB_DB_READ (1ul << 29) #define NTB_PCI_REG (1ul << 28) #define NTB_REGFLAGS_MASK (NTB_REGSZ_MASK | NTB_DB_READ | NTB_PCI_REG) static void intel_ntb_sysctl_init(struct ntb_softc *ntb) { struct sysctl_oid_list *globals, *tree_par, *regpar, *statpar, *errpar; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree, *tmptree; ctx = device_get_sysctl_ctx(ntb->device); globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device)); SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "link_status", CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_NEEDGIANT, ntb, 0, sysctl_handle_link_status_human, "A", "Link status (human readable)"); SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "active", CTLFLAG_RD | CTLTYPE_UINT | CTLFLAG_NEEDGIANT, ntb, 0, sysctl_handle_link_status, "IU", "Link status (1=active, 0=inactive)"); SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "admin_up", CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_NEEDGIANT, ntb, 0, sysctl_handle_link_admin, "IU", "Set/get interface status (1=UP, 0=DOWN)"); tree = SYSCTL_ADD_NODE(ctx, globals, OID_AUTO, "debug_info", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Driver state, statistics, and HW registers"); tree_par = SYSCTL_CHILDREN(tree); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "conn_type", CTLFLAG_RD, &ntb->conn_type, 0, "0 - Transparent; 1 - B2B; 2 - Root Port"); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "dev_type", CTLFLAG_RD, &ntb->dev_type, 0, "0 - USD; 1 - DSD"); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ppd", CTLFLAG_RD, &ntb->ppd, 0, "Raw PPD register (cached)"); if (ntb->b2b_mw_idx != B2B_MW_DISABLED) { SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "b2b_idx", CTLFLAG_RD, &ntb->b2b_mw_idx, 0, "Index of the MW used for B2B remote register access"); SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "b2b_off", CTLFLAG_RD, &ntb->b2b_off, "If non-zero, offset of B2B register region in shared MW"); } SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "features", CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_NEEDGIANT, ntb, 0, sysctl_handle_features, "A", "Features/errata of this NTB device"); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ntb_ctl", CTLFLAG_RD, __DEVOLATILE(uint32_t *, &ntb->ntb_ctl), 0, "NTB CTL register (cached)"); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "lnk_sta", CTLFLAG_RD, __DEVOLATILE(uint32_t *, &ntb->lnk_sta), 0, "LNK STA register (cached)"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "mw_count", CTLFLAG_RD, &ntb->mw_count, 0, "MW count"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "spad_count", CTLFLAG_RD, &ntb->spad_count, 0, "Scratchpad count"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_count", CTLFLAG_RD, &ntb->db_count, 0, "Doorbell count"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_count", CTLFLAG_RD, &ntb->db_vec_count, 0, "Doorbell vector count"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_shift", CTLFLAG_RD, &ntb->db_vec_shift, 0, "Doorbell vector shift"); SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_valid_mask", CTLFLAG_RD, &ntb->db_valid_mask, "Doorbell valid mask"); SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_link_mask", CTLFLAG_RD, &ntb->db_link_mask, "Doorbell link mask"); SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_mask", CTLFLAG_RD, &ntb->db_mask, "Doorbell mask (cached)"); tmptree = SYSCTL_ADD_NODE(ctx, tree_par, OID_AUTO, "registers", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Raw HW registers (big-endian)"); regpar = SYSCTL_CHILDREN(tmptree); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ntbcntl", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | ntb->reg->ntb_ctl, sysctl_handle_register, "IU", "NTB Control register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcap", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | 0x19c, sysctl_handle_register, "IU", "NTB Link Capabilities"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcon", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | 0x1a0, sysctl_handle_register, "IU", "NTB Link Control register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_mask", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_mask, sysctl_handle_register, "QU", "Doorbell mask register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_bell", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_bell, sysctl_handle_register, "QU", "Doorbell register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat23", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_64 | ntb->xlat_reg->bar2_xlat, sysctl_handle_register, "QU", "Incoming XLAT23 register"); if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat4", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | ntb->xlat_reg->bar4_xlat, sysctl_handle_register, "IU", "Incoming XLAT4 register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat5", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | ntb->xlat_reg->bar5_xlat, sysctl_handle_register, "IU", "Incoming XLAT5 register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat45", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_64 | ntb->xlat_reg->bar4_xlat, sysctl_handle_register, "QU", "Incoming XLAT45 register"); } SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt23", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_64 | ntb->xlat_reg->bar2_limit, sysctl_handle_register, "QU", "Incoming LMT23 register"); if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt4", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | ntb->xlat_reg->bar4_limit, sysctl_handle_register, "IU", "Incoming LMT4 register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt5", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | ntb->xlat_reg->bar5_limit, sysctl_handle_register, "IU", "Incoming LMT5 register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt45", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_64 | ntb->xlat_reg->bar4_limit, sysctl_handle_register, "QU", "Incoming LMT45 register"); } if (ntb->type == NTB_ATOM) return; tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Xeon HW statistics"); statpar = SYSCTL_CHILDREN(tmptree); SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "upstream_mem_miss", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_16 | XEON_USMEMMISS_OFFSET, sysctl_handle_register, "SU", "Upstream Memory Miss"); tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_hw_err", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Xeon HW errors"); errpar = SYSCTL_CHILDREN(tmptree); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ppd", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_8 | NTB_PCI_REG | NTB_PPD_OFFSET, sysctl_handle_register, "CU", "PPD"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar23_sz", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_PBAR23SZ_OFFSET, sysctl_handle_register, "CU", "PBAR23 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar4_sz", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_PBAR4SZ_OFFSET, sysctl_handle_register, "CU", "PBAR4 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar5_sz", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_PBAR5SZ_OFFSET, sysctl_handle_register, "CU", "PBAR5 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_sz", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_SBAR23SZ_OFFSET, sysctl_handle_register, "CU", "SBAR23 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_sz", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_SBAR4SZ_OFFSET, sysctl_handle_register, "CU", "SBAR4 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_sz", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_SBAR5SZ_OFFSET, sysctl_handle_register, "CU", "SBAR5 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "devsts", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_16 | NTB_PCI_REG | XEON_DEVSTS_OFFSET, sysctl_handle_register, "SU", "DEVSTS"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnksts", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_16 | NTB_PCI_REG | XEON_LINK_STATUS_OFFSET, sysctl_handle_register, "SU", "LNKSTS"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "slnksts", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_16 | NTB_PCI_REG | XEON_SLINK_STATUS_OFFSET, sysctl_handle_register, "SU", "SLNKSTS"); SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "uncerrsts", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | NTB_PCI_REG | XEON_UNCERRSTS_OFFSET, sysctl_handle_register, "IU", "UNCERRSTS"); SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "corerrsts", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | NTB_PCI_REG | XEON_CORERRSTS_OFFSET, sysctl_handle_register, "IU", "CORERRSTS"); if (ntb->conn_type != NTB_CONN_B2B) return; SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat01l", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | XEON_B2B_XLAT_OFFSETL, sysctl_handle_register, "IU", "Outgoing XLAT0L register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat01u", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | XEON_B2B_XLAT_OFFSETU, sysctl_handle_register, "IU", "Outgoing XLAT0U register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat23", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off, sysctl_handle_register, "QU", "Outgoing XLAT23 register"); if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat4", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, sysctl_handle_register, "IU", "Outgoing XLAT4 register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat5", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off, sysctl_handle_register, "IU", "Outgoing XLAT5 register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat45", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, sysctl_handle_register, "QU", "Outgoing XLAT45 register"); } SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt23", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_64 | XEON_PBAR2LMT_OFFSET, sysctl_handle_register, "QU", "Outgoing LMT23 register"); if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt4", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | XEON_PBAR4LMT_OFFSET, sysctl_handle_register, "IU", "Outgoing LMT4 register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt5", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | XEON_PBAR5LMT_OFFSET, sysctl_handle_register, "IU", "Outgoing LMT5 register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt45", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_64 | XEON_PBAR4LMT_OFFSET, sysctl_handle_register, "QU", "Outgoing LMT45 register"); } SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar01_base", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_64 | ntb->xlat_reg->bar0_base, sysctl_handle_register, "QU", "Secondary BAR01 base register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_base", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_64 | ntb->xlat_reg->bar2_base, sysctl_handle_register, "QU", "Secondary BAR23 base register"); if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_base", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | ntb->xlat_reg->bar4_base, sysctl_handle_register, "IU", "Secondary BAR4 base register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_base", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_32 | ntb->xlat_reg->bar5_base, sysctl_handle_register, "IU", "Secondary BAR5 base register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar45_base", CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_NEEDGIANT, ntb, NTB_REG_64 | ntb->xlat_reg->bar4_base, sysctl_handle_register, "QU", "Secondary BAR45 base register"); } } static int sysctl_handle_features(SYSCTL_HANDLER_ARGS) { struct ntb_softc *ntb = arg1; struct sbuf sb; int error; sbuf_new_for_sysctl(&sb, NULL, 256, req); sbuf_printf(&sb, "%b", ntb->features, NTB_FEATURES_STR); error = sbuf_finish(&sb); sbuf_delete(&sb); if (error || !req->newptr) return (error); return (EINVAL); } static int sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS) { struct ntb_softc *ntb = arg1; unsigned old, new; int error; old = intel_ntb_link_enabled(ntb->device); error = SYSCTL_OUT(req, &old, sizeof(old)); if (error != 0 || req->newptr == NULL) return (error); error = SYSCTL_IN(req, &new, sizeof(new)); if (error != 0) return (error); intel_ntb_printf(0, "Admin set interface state to '%sabled'\n", (new != 0)? "en" : "dis"); if (new != 0) error = intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); else error = intel_ntb_link_disable(ntb->device); return (error); } static int sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS) { struct ntb_softc *ntb = arg1; struct sbuf sb; enum ntb_speed speed; enum ntb_width width; int error; sbuf_new_for_sysctl(&sb, NULL, 32, req); if (intel_ntb_link_is_up(ntb->device, &speed, &width)) sbuf_printf(&sb, "up / PCIe Gen %u / Width x%u", (unsigned)speed, (unsigned)width); else sbuf_printf(&sb, "down"); error = sbuf_finish(&sb); sbuf_delete(&sb); if (error || !req->newptr) return (error); return (EINVAL); } static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS) { struct ntb_softc *ntb = arg1; unsigned res; int error; res = intel_ntb_link_is_up(ntb->device, NULL, NULL); error = SYSCTL_OUT(req, &res, sizeof(res)); if (error || !req->newptr) return (error); return (EINVAL); } static int sysctl_handle_register(SYSCTL_HANDLER_ARGS) { struct ntb_softc *ntb; const void *outp; uintptr_t sz; uint64_t umv; char be[sizeof(umv)]; size_t outsz; uint32_t reg; bool db, pci; int error; ntb = arg1; reg = arg2 & ~NTB_REGFLAGS_MASK; sz = arg2 & NTB_REGSZ_MASK; db = (arg2 & NTB_DB_READ) != 0; pci = (arg2 & NTB_PCI_REG) != 0; KASSERT(!(db && pci), ("bogus")); if (db) { KASSERT(sz == NTB_REG_64, ("bogus")); umv = db_ioread(ntb, reg); outsz = sizeof(uint64_t); } else { switch (sz) { case NTB_REG_64: if (pci) umv = pci_read_config(ntb->device, reg, 8); else umv = intel_ntb_reg_read(8, reg); outsz = sizeof(uint64_t); break; case NTB_REG_32: if (pci) umv = pci_read_config(ntb->device, reg, 4); else umv = intel_ntb_reg_read(4, reg); outsz = sizeof(uint32_t); break; case NTB_REG_16: if (pci) umv = pci_read_config(ntb->device, reg, 2); else umv = intel_ntb_reg_read(2, reg); outsz = sizeof(uint16_t); break; case NTB_REG_8: if (pci) umv = pci_read_config(ntb->device, reg, 1); else umv = intel_ntb_reg_read(1, reg); outsz = sizeof(uint8_t); break; default: panic("bogus"); break; } } /* Encode bigendian so that sysctl -x is legible. */ be64enc(be, umv); outp = ((char *)be) + sizeof(umv) - outsz; error = SYSCTL_OUT(req, outp, outsz); if (error || !req->newptr) return (error); return (EINVAL); } static unsigned intel_ntb_user_mw_to_idx(struct ntb_softc *ntb, unsigned uidx) { if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 && uidx >= ntb->b2b_mw_idx) || (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx)) uidx++; if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 && uidx >= ntb->b2b_mw_idx) && (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx)) uidx++; return (uidx); } #ifndef EARLY_AP_STARTUP static int msix_ready; static void intel_ntb_msix_ready(void *arg __unused) { msix_ready = 1; } SYSINIT(intel_ntb_msix_ready, SI_SUB_SMP, SI_ORDER_ANY, intel_ntb_msix_ready, NULL); #endif static void intel_ntb_exchange_msix(void *ctx) { struct ntb_softc *ntb; uint32_t val; unsigned i; ntb = ctx; if (ntb->peer_msix_good) goto msix_good; if (ntb->peer_msix_done) goto msix_done; #ifndef EARLY_AP_STARTUP /* Block MSIX negotiation until SMP started and IRQ reshuffled. */ if (!msix_ready) goto reschedule; #endif intel_ntb_get_msix_info(ntb); for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DATA0 + i, ntb->msix_data[i].nmd_data); intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_OFS0 + i, ntb->msix_data[i].nmd_ofs - ntb->msix_xlat); } intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_GUARD, NTB_MSIX_VER_GUARD); intel_ntb_spad_read(ntb->device, NTB_MSIX_GUARD, &val); if (val != NTB_MSIX_VER_GUARD) goto reschedule; for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { intel_ntb_spad_read(ntb->device, NTB_MSIX_DATA0 + i, &val); intel_ntb_printf(2, "remote MSIX data(%u): 0x%x\n", i, val); ntb->peer_msix_data[i].nmd_data = val; intel_ntb_spad_read(ntb->device, NTB_MSIX_OFS0 + i, &val); intel_ntb_printf(2, "remote MSIX addr(%u): 0x%x\n", i, val); ntb->peer_msix_data[i].nmd_ofs = val; } ntb->peer_msix_done = true; msix_done: intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DONE, NTB_MSIX_RECEIVED); intel_ntb_spad_read(ntb->device, NTB_MSIX_DONE, &val); if (val != NTB_MSIX_RECEIVED) goto reschedule; intel_ntb_spad_clear(ntb->device); ntb->peer_msix_good = true; /* Give peer time to see our NTB_MSIX_RECEIVED. */ goto reschedule; msix_good: intel_ntb_poll_link(ntb); ntb_link_event(ntb->device); return; reschedule: ntb->lnk_sta = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2); if (_xeon_link_is_up(ntb)) { callout_reset(&ntb->peer_msix_work, hz * (ntb->peer_msix_good ? 2 : 1) / 10, intel_ntb_exchange_msix, ntb); } else intel_ntb_spad_clear(ntb->device); } /* * Public API to the rest of the OS */ static uint8_t intel_ntb_spad_count(device_t dev) { struct ntb_softc *ntb = device_get_softc(dev); return (ntb->spad_count); } static uint8_t intel_ntb_mw_count(device_t dev) { struct ntb_softc *ntb = device_get_softc(dev); uint8_t res; res = ntb->mw_count; if (ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0) res--; if (ntb->msix_mw_idx != B2B_MW_DISABLED) res--; return (res); } static int intel_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val) { struct ntb_softc *ntb = device_get_softc(dev); if (idx >= ntb->spad_count) return (EINVAL); intel_ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val); return (0); } /* * Zeros the local scratchpad. */ static void intel_ntb_spad_clear(device_t dev) { struct ntb_softc *ntb = device_get_softc(dev); unsigned i; for (i = 0; i < ntb->spad_count; i++) intel_ntb_spad_write(dev, i, 0); } static int intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val) { struct ntb_softc *ntb = device_get_softc(dev); if (idx >= ntb->spad_count) return (EINVAL); *val = intel_ntb_reg_read(4, ntb->self_reg->spad + idx * 4); return (0); } static int intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val) { struct ntb_softc *ntb = device_get_softc(dev); if (idx >= ntb->spad_count) return (EINVAL); if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) intel_ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val); else intel_ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val); return (0); } static int intel_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val) { struct ntb_softc *ntb = device_get_softc(dev); if (idx >= ntb->spad_count) return (EINVAL); if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) *val = intel_ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4); else *val = intel_ntb_reg_read(4, ntb->peer_reg->spad + idx * 4); return (0); } static int intel_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base, caddr_t *vbase, size_t *size, size_t *align, size_t *align_size, bus_addr_t *plimit) { struct ntb_softc *ntb = device_get_softc(dev); struct ntb_pci_bar_info *bar; bus_addr_t limit; size_t bar_b2b_off; enum ntb_bar bar_num; if (mw_idx >= intel_ntb_mw_count(dev)) return (EINVAL); mw_idx = intel_ntb_user_mw_to_idx(ntb, mw_idx); bar_num = intel_ntb_mw_to_bar(ntb, mw_idx); bar = &ntb->bar_info[bar_num]; bar_b2b_off = 0; if (mw_idx == ntb->b2b_mw_idx) { KASSERT(ntb->b2b_off != 0, ("user shouldn't get non-shared b2b mw")); bar_b2b_off = ntb->b2b_off; } if (bar_is_64bit(ntb, bar_num)) limit = BUS_SPACE_MAXADDR; else limit = BUS_SPACE_MAXADDR_32BIT; if (base != NULL) *base = bar->pbase + bar_b2b_off; if (vbase != NULL) *vbase = bar->vbase + bar_b2b_off; if (size != NULL) *size = bar->size - bar_b2b_off; if (align != NULL) *align = bar->size; if (align_size != NULL) *align_size = 1; if (plimit != NULL) *plimit = limit; return (0); } static int intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size) { struct ntb_softc *ntb = device_get_softc(dev); struct ntb_pci_bar_info *bar; uint64_t base, limit, reg_val; size_t bar_size, mw_size; uint32_t base_reg, xlat_reg, limit_reg; enum ntb_bar bar_num; if (idx >= intel_ntb_mw_count(dev)) return (EINVAL); idx = intel_ntb_user_mw_to_idx(ntb, idx); bar_num = intel_ntb_mw_to_bar(ntb, idx); bar = &ntb->bar_info[bar_num]; bar_size = bar->size; if (idx == ntb->b2b_mw_idx) mw_size = bar_size - ntb->b2b_off; else mw_size = bar_size; /* Hardware requires that addr is aligned to bar size */ if ((addr & (bar_size - 1)) != 0) return (EINVAL); if (size > mw_size) return (EINVAL); bar_get_xlat_params(ntb, bar_num, &base_reg, &xlat_reg, &limit_reg); limit = 0; if (bar_is_64bit(ntb, bar_num)) { base = intel_ntb_reg_read(8, base_reg) & BAR_HIGH_MASK; if (limit_reg != 0 && size != mw_size) limit = base + size; else limit = base + mw_size; /* Set and verify translation address */ intel_ntb_reg_write(8, xlat_reg, addr); reg_val = intel_ntb_reg_read(8, xlat_reg) & BAR_HIGH_MASK; if (reg_val != addr) { intel_ntb_reg_write(8, xlat_reg, 0); return (EIO); } /* Set and verify the limit */ intel_ntb_reg_write(8, limit_reg, limit); reg_val = intel_ntb_reg_read(8, limit_reg) & BAR_HIGH_MASK; if (reg_val != limit) { intel_ntb_reg_write(8, limit_reg, base); intel_ntb_reg_write(8, xlat_reg, 0); return (EIO); } if (ntb->type == NTB_XEON_GEN3) { limit = base + size; /* set EMBAR1/2XLIMIT */ if (!idx) intel_ntb_reg_write(8, XEON_GEN3_REG_EMBAR1XLIMIT, limit); else intel_ntb_reg_write(8, XEON_GEN3_REG_EMBAR2XLIMIT, limit); } } else { /* Configure 32-bit (split) BAR MW */ if (ntb->type == NTB_XEON_GEN3) return (EIO); if ((addr & UINT32_MAX) != addr) return (ERANGE); if (((addr + size) & UINT32_MAX) != (addr + size)) return (ERANGE); base = intel_ntb_reg_read(4, base_reg) & BAR_HIGH_MASK; if (limit_reg != 0 && size != mw_size) limit = base + size; /* Set and verify translation address */ intel_ntb_reg_write(4, xlat_reg, addr); reg_val = intel_ntb_reg_read(4, xlat_reg) & BAR_HIGH_MASK; if (reg_val != addr) { intel_ntb_reg_write(4, xlat_reg, 0); return (EIO); } /* Set and verify the limit */ intel_ntb_reg_write(4, limit_reg, limit); reg_val = intel_ntb_reg_read(4, limit_reg) & BAR_HIGH_MASK; if (reg_val != limit) { intel_ntb_reg_write(4, limit_reg, base); intel_ntb_reg_write(4, xlat_reg, 0); return (EIO); } } return (0); } static int intel_ntb_mw_clear_trans(device_t dev, unsigned mw_idx) { return (intel_ntb_mw_set_trans(dev, mw_idx, 0, 0)); } static int intel_ntb_mw_get_wc(device_t dev, unsigned idx, vm_memattr_t *mode) { struct ntb_softc *ntb = device_get_softc(dev); struct ntb_pci_bar_info *bar; if (idx >= intel_ntb_mw_count(dev)) return (EINVAL); idx = intel_ntb_user_mw_to_idx(ntb, idx); bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)]; *mode = bar->map_mode; return (0); } static int intel_ntb_mw_set_wc(device_t dev, unsigned idx, vm_memattr_t mode) { struct ntb_softc *ntb = device_get_softc(dev); if (idx >= intel_ntb_mw_count(dev)) return (EINVAL); idx = intel_ntb_user_mw_to_idx(ntb, idx); return (intel_ntb_mw_set_wc_internal(ntb, idx, mode)); } static int intel_ntb_mw_set_wc_internal(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode) { struct ntb_pci_bar_info *bar; int rc; bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)]; if (bar->map_mode == mode) return (0); rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mode); if (rc == 0) bar->map_mode = mode; return (rc); } static void intel_ntb_peer_db_set(device_t dev, uint64_t bits) { struct ntb_softc *ntb = device_get_softc(dev); uint64_t db; if ((bits & ~ntb->db_valid_mask) != 0) { device_printf(ntb->device, "Invalid doorbell bits %#jx\n", (uintmax_t)bits); return; } if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { struct ntb_pci_bar_info *lapic; unsigned i; lapic = ntb->peer_lapic_bar; for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { if ((bits & intel_ntb_db_vector_mask(dev, i)) != 0) bus_space_write_4(lapic->pci_bus_tag, lapic->pci_bus_handle, ntb->peer_msix_data[i].nmd_ofs, ntb->peer_msix_data[i].nmd_data); } return; } if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) { intel_ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bits); return; } if (ntb->type == NTB_XEON_GEN3) { while (bits != 0) { db = ffsll(bits); intel_ntb_reg_write(1, ntb->peer_reg->db_bell + (db - 1) * 4, 0x1); bits = bits & (bits - 1); } } else { db_iowrite(ntb, ntb->peer_reg->db_bell, bits); } } static int intel_ntb_peer_db_addr(device_t dev, bus_addr_t *db_addr, vm_size_t *db_size) { struct ntb_softc *ntb = device_get_softc(dev); struct ntb_pci_bar_info *bar; uint64_t regoff; KASSERT((db_addr != NULL && db_size != NULL), ("must be non-NULL")); if (!HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) { bar = &ntb->bar_info[NTB_CONFIG_BAR]; regoff = ntb->peer_reg->db_bell; } else { KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED, ("invalid b2b idx")); bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)]; regoff = XEON_PDOORBELL_OFFSET; } KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh")); /* HACK: Specific to current x86 bus implementation. */ *db_addr = ((uint64_t)bar->pci_bus_handle + regoff); *db_size = ntb->reg->db_size; return (0); } static uint64_t intel_ntb_db_valid_mask(device_t dev) { struct ntb_softc *ntb = device_get_softc(dev); return (ntb->db_valid_mask); } static int intel_ntb_db_vector_count(device_t dev) { struct ntb_softc *ntb = device_get_softc(dev); return (ntb->db_vec_count); } static uint64_t intel_ntb_db_vector_mask(device_t dev, uint32_t vector) { struct ntb_softc *ntb = device_get_softc(dev); if (vector > ntb->db_vec_count) return (0); return (ntb->db_valid_mask & intel_ntb_vec_mask(ntb, vector)); } static bool intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width) { struct ntb_softc *ntb = device_get_softc(dev); if (speed != NULL) *speed = intel_ntb_link_sta_speed(ntb); if (width != NULL) *width = intel_ntb_link_sta_width(ntb); return (link_is_up(ntb)); } static void save_bar_parameters(struct ntb_pci_bar_info *bar) { bar->pci_bus_tag = rman_get_bustag(bar->pci_resource); bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource); bar->pbase = rman_get_start(bar->pci_resource); bar->size = rman_get_size(bar->pci_resource); bar->vbase = rman_get_virtual(bar->pci_resource); } static device_method_t ntb_intel_methods[] = { /* Device interface */ DEVMETHOD(device_probe, intel_ntb_probe), DEVMETHOD(device_attach, intel_ntb_attach), DEVMETHOD(device_detach, intel_ntb_detach), /* Bus interface */ - DEVMETHOD(bus_child_location_str, ntb_child_location_str), + DEVMETHOD(bus_child_location, ntb_child_location), DEVMETHOD(bus_print_child, ntb_print_child), DEVMETHOD(bus_get_dma_tag, ntb_get_dma_tag), /* NTB interface */ DEVMETHOD(ntb_port_number, intel_ntb_port_number), DEVMETHOD(ntb_peer_port_count, intel_ntb_peer_port_count), DEVMETHOD(ntb_peer_port_number, intel_ntb_peer_port_number), DEVMETHOD(ntb_peer_port_idx, intel_ntb_peer_port_idx), DEVMETHOD(ntb_link_is_up, intel_ntb_link_is_up), DEVMETHOD(ntb_link_enable, intel_ntb_link_enable), DEVMETHOD(ntb_link_disable, intel_ntb_link_disable), DEVMETHOD(ntb_link_enabled, intel_ntb_link_enabled), DEVMETHOD(ntb_mw_count, intel_ntb_mw_count), DEVMETHOD(ntb_mw_get_range, intel_ntb_mw_get_range), DEVMETHOD(ntb_mw_set_trans, intel_ntb_mw_set_trans), DEVMETHOD(ntb_mw_clear_trans, intel_ntb_mw_clear_trans), DEVMETHOD(ntb_mw_get_wc, intel_ntb_mw_get_wc), DEVMETHOD(ntb_mw_set_wc, intel_ntb_mw_set_wc), DEVMETHOD(ntb_spad_count, intel_ntb_spad_count), DEVMETHOD(ntb_spad_clear, intel_ntb_spad_clear), DEVMETHOD(ntb_spad_write, intel_ntb_spad_write), DEVMETHOD(ntb_spad_read, intel_ntb_spad_read), DEVMETHOD(ntb_peer_spad_write, intel_ntb_peer_spad_write), DEVMETHOD(ntb_peer_spad_read, intel_ntb_peer_spad_read), DEVMETHOD(ntb_db_valid_mask, intel_ntb_db_valid_mask), DEVMETHOD(ntb_db_vector_count, intel_ntb_db_vector_count), DEVMETHOD(ntb_db_vector_mask, intel_ntb_db_vector_mask), DEVMETHOD(ntb_db_clear, intel_ntb_db_clear), DEVMETHOD(ntb_db_clear_mask, intel_ntb_db_clear_mask), DEVMETHOD(ntb_db_read, intel_ntb_db_read), DEVMETHOD(ntb_db_set_mask, intel_ntb_db_set_mask), DEVMETHOD(ntb_peer_db_addr, intel_ntb_peer_db_addr), DEVMETHOD(ntb_peer_db_set, intel_ntb_peer_db_set), DEVMETHOD_END }; static DEFINE_CLASS_0(ntb_hw, ntb_intel_driver, ntb_intel_methods, sizeof(struct ntb_softc)); DRIVER_MODULE(ntb_hw_intel, pci, ntb_intel_driver, ntb_hw_devclass, NULL, NULL); MODULE_DEPEND(ntb_hw_intel, ntb, 1, 1, 1); MODULE_VERSION(ntb_hw_intel, 1); MODULE_PNP_INFO("W32:vendor/device;D:#", pci, ntb_hw_intel, pci_ids, nitems(pci_ids)); diff --git a/sys/dev/ntb/ntb_hw/ntb_hw_plx.c b/sys/dev/ntb/ntb_hw/ntb_hw_plx.c index f3d8af4971a4..4231b1209291 100644 --- a/sys/dev/ntb/ntb_hw/ntb_hw_plx.c +++ b/sys/dev/ntb/ntb_hw/ntb_hw_plx.c @@ -1,1096 +1,1096 @@ /*- * Copyright (c) 2017-2019 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * The Non-Transparent Bridge (NTB) is a device that allows you to connect * two or more systems using a PCI-e links, providing remote memory access. * * This module contains a driver for NTBs in PLX/Avago/Broadcom PCIe bridges. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "../ntb.h" #define PLX_MAX_BARS 4 /* There are at most 4 data BARs. */ #define PLX_NUM_SPAD 8 /* There are 8 scratchpads. */ #define PLX_NUM_SPAD_PATT 4 /* Use test pattern as 4 more. */ #define PLX_NUM_DB 16 /* There are 16 doorbells. */ #define PLX_MAX_SPLIT 128 /* Allow are at most 128 splits. */ struct ntb_plx_mw_info { int mw_bar; int mw_64bit; int mw_rid; struct resource *mw_res; vm_paddr_t mw_pbase; caddr_t mw_vbase; vm_size_t mw_size; struct { vm_memattr_t mw_map_mode; bus_addr_t mw_xlat_addr; bus_size_t mw_xlat_size; } splits[PLX_MAX_SPLIT]; }; struct ntb_plx_softc { /* ntb.c context. Do not move! Must go first! */ void *ntb_store; device_t dev; struct resource *conf_res; int conf_rid; u_int ntx; /* NTx number within chip. */ u_int link; /* Link v/s Virtual side. */ u_int port; /* Port number within chip. */ u_int alut; /* A-LUT is enabled for NTx */ u_int split; /* split BAR2 into 2^x parts */ int int_rid; struct resource *int_res; void *int_tag; struct ntb_plx_mw_info mw_info[PLX_MAX_BARS]; int mw_count; /* Number of memory windows. */ int spad_count1; /* Number of standard spads. */ int spad_count2; /* Number of extra spads. */ uint32_t spad_off1; /* Offset of our spads. */ uint32_t spad_off2; /* Offset of our extra spads. */ uint32_t spad_offp1; /* Offset of peer spads. */ uint32_t spad_offp2; /* Offset of peer extra spads. */ /* Parameters of window shared with peer config access in B2B mode. */ int b2b_mw; /* Shared window number. */ uint64_t b2b_off; /* Offset in shared window. */ }; #define PLX_NT0_BASE 0x3E000 #define PLX_NT1_BASE 0x3C000 #define PLX_NTX_BASE(sc) ((sc)->ntx ? PLX_NT1_BASE : PLX_NT0_BASE) #define PLX_NTX_LINK_OFFSET 0x01000 /* Bases of NTx our/peer interface registers */ #define PLX_NTX_OUR_BASE(sc) \ (PLX_NTX_BASE(sc) + ((sc)->link ? PLX_NTX_LINK_OFFSET : 0)) #define PLX_NTX_PEER_BASE(sc) \ (PLX_NTX_BASE(sc) + ((sc)->link ? 0 : PLX_NTX_LINK_OFFSET)) /* Read/write NTx our interface registers */ #define NTX_READ(sc, reg) \ bus_read_4((sc)->conf_res, PLX_NTX_OUR_BASE(sc) + (reg)) #define NTX_WRITE(sc, reg, val) \ bus_write_4((sc)->conf_res, PLX_NTX_OUR_BASE(sc) + (reg), (val)) /* Read/write NTx peer interface registers */ #define PNTX_READ(sc, reg) \ bus_read_4((sc)->conf_res, PLX_NTX_PEER_BASE(sc) + (reg)) #define PNTX_WRITE(sc, reg, val) \ bus_write_4((sc)->conf_res, PLX_NTX_PEER_BASE(sc) + (reg), (val)) /* Read/write B2B NTx registers */ #define BNTX_READ(sc, reg) \ bus_read_4((sc)->mw_info[(sc)->b2b_mw].mw_res, \ PLX_NTX_BASE(sc) + (reg)) #define BNTX_WRITE(sc, reg, val) \ bus_write_4((sc)->mw_info[(sc)->b2b_mw].mw_res, \ PLX_NTX_BASE(sc) + (reg), (val)) #define PLX_PORT_BASE(p) ((p) << 12) #define PLX_STATION_PORT_BASE(sc) PLX_PORT_BASE((sc)->port & ~7) #define PLX_PORT_CONTROL(sc) (PLX_STATION_PORT_BASE(sc) + 0x208) static int ntb_plx_init(device_t dev); static int ntb_plx_detach(device_t dev); static int ntb_plx_mw_set_trans_internal(device_t dev, unsigned mw_idx); static int ntb_plx_probe(device_t dev) { switch (pci_get_devid(dev)) { case 0x87a010b5: device_set_desc(dev, "PLX Non-Transparent Bridge NT0 Link"); return (BUS_PROBE_DEFAULT); case 0x87a110b5: device_set_desc(dev, "PLX Non-Transparent Bridge NT1 Link"); return (BUS_PROBE_DEFAULT); case 0x87b010b5: device_set_desc(dev, "PLX Non-Transparent Bridge NT0 Virtual"); return (BUS_PROBE_DEFAULT); case 0x87b110b5: device_set_desc(dev, "PLX Non-Transparent Bridge NT1 Virtual"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int ntb_plx_init(device_t dev) { struct ntb_plx_softc *sc = device_get_softc(dev); struct ntb_plx_mw_info *mw; uint64_t val64; int i; uint32_t val; if (sc->b2b_mw >= 0) { /* Set peer BAR0/1 size and address for B2B NTx access. */ mw = &sc->mw_info[sc->b2b_mw]; if (mw->mw_64bit) { PNTX_WRITE(sc, 0xe4, 0x3); /* 64-bit */ val64 = 0x2000000000000000 * mw->mw_bar | 0x4; PNTX_WRITE(sc, PCIR_BAR(0), val64); PNTX_WRITE(sc, PCIR_BAR(0) + 4, val64 >> 32); } else { PNTX_WRITE(sc, 0xe4, 0x2); /* 32-bit */ val = 0x20000000 * mw->mw_bar; PNTX_WRITE(sc, PCIR_BAR(0), val); } /* Set Virtual to Link address translation for B2B. */ for (i = 0; i < sc->mw_count; i++) { mw = &sc->mw_info[i]; if (mw->mw_64bit) { val64 = 0x2000000000000000 * mw->mw_bar; NTX_WRITE(sc, 0xc3c + (mw->mw_bar - 2) * 4, val64); NTX_WRITE(sc, 0xc3c + (mw->mw_bar - 2) * 4 + 4, val64 >> 32); } else { val = 0x20000000 * mw->mw_bar; NTX_WRITE(sc, 0xc3c + (mw->mw_bar - 2) * 4, val); } } /* Make sure Virtual to Link A-LUT is disabled. */ if (sc->alut) PNTX_WRITE(sc, 0xc94, 0); /* Enable all Link Interface LUT entries for peer. */ for (i = 0; i < 32; i += 2) { PNTX_WRITE(sc, 0xdb4 + i * 2, 0x00010001 | ((i + 1) << 19) | (i << 3)); } } /* * Enable Virtual Interface LUT entry 0 for 0:0.*. * entry 1 for our Requester ID reported by the chip, * entries 2-5 for 0/64/128/192:4.* of I/OAT DMA engines. * XXX: Its a hack, we can't know all DMA engines, but this covers all * I/OAT of Xeon E5/E7 at least from Sandy Bridge till Skylake I saw. */ val = (NTX_READ(sc, 0xc90) << 16) | 0x00010001; NTX_WRITE(sc, sc->link ? 0xdb4 : 0xd94, val); NTX_WRITE(sc, sc->link ? 0xdb8 : 0xd98, 0x40210021); NTX_WRITE(sc, sc->link ? 0xdbc : 0xd9c, 0xc0218021); /* Set Link to Virtual address translation. */ for (i = 0; i < sc->mw_count; i++) ntb_plx_mw_set_trans_internal(dev, i); pci_enable_busmaster(dev); if (sc->b2b_mw >= 0) PNTX_WRITE(sc, PCIR_COMMAND, PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); return (0); } static void ntb_plx_isr(void *arg) { device_t dev = arg; struct ntb_plx_softc *sc = device_get_softc(dev); uint32_t val; ntb_db_event((device_t)arg, 0); if (sc->link) /* Link Interface has no Link Error registers. */ return; val = NTX_READ(sc, 0xfe0); if (val == 0) return; NTX_WRITE(sc, 0xfe0, val); if (val & 1) device_printf(dev, "Correctable Error\n"); if (val & 2) device_printf(dev, "Uncorrectable Error\n"); if (val & 4) { /* DL_Down resets link side registers, have to reinit. */ ntb_plx_init(dev); ntb_link_event(dev); } if (val & 8) device_printf(dev, "Uncorrectable Error Message Drop\n"); } static int ntb_plx_setup_intr(device_t dev) { struct ntb_plx_softc *sc = device_get_softc(dev); int error; /* * XXX: This hardware supports MSI, but I found it unusable. * It generates new MSI only when doorbell register goes from * zero, but does not generate it when another bit is set or on * partial clear. It makes operation very racy and unreliable. * The data book mentions some mask juggling magic to workaround * that, but I failed to make it work. */ sc->int_rid = 0; sc->int_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->int_rid, RF_SHAREABLE|RF_ACTIVE); if (sc->int_res == NULL) { device_printf(dev, "bus_alloc_resource failed\n"); return (ENOMEM); } error = bus_setup_intr(dev, sc->int_res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, ntb_plx_isr, dev, &sc->int_tag); if (error != 0) { device_printf(dev, "bus_setup_intr failed: %d\n", error); return (error); } if (!sc->link) { /* Link Interface has no Link Error registers. */ NTX_WRITE(sc, 0xfe0, 0xf); /* Clear link interrupts. */ NTX_WRITE(sc, 0xfe4, 0x0); /* Unmask link interrupts. */ } return (0); } static void ntb_plx_teardown_intr(device_t dev) { struct ntb_plx_softc *sc = device_get_softc(dev); if (!sc->link) /* Link Interface has no Link Error registers. */ NTX_WRITE(sc, 0xfe4, 0xf); /* Mask link interrupts. */ if (sc->int_res) { bus_teardown_intr(dev, sc->int_res, sc->int_tag); bus_release_resource(dev, SYS_RES_IRQ, sc->int_rid, sc->int_res); } } static int ntb_plx_attach(device_t dev) { struct ntb_plx_softc *sc = device_get_softc(dev); struct ntb_plx_mw_info *mw; int error = 0, i, j; uint32_t val; char buf[32]; /* Identify what we are (what side of what NTx). */ sc->dev = dev; val = pci_read_config(dev, 0xc8c, 4); sc->ntx = (val & 1) != 0; sc->link = (val & 0x80000000) != 0; /* Get access to whole 256KB of chip configuration space via BAR0/1. */ sc->conf_rid = PCIR_BAR(0); sc->conf_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->conf_rid, RF_ACTIVE); if (sc->conf_res == NULL) { device_printf(dev, "Can't allocate configuration BAR.\n"); return (ENXIO); } /* * The device occupies whole bus. In translated TLP slot field * keeps LUT index (original bus/slot), function is passed through. */ bus_dma_iommu_set_buswide(dev); /* Identify chip port we are connected to. */ val = bus_read_4(sc->conf_res, 0x360); sc->port = (val >> ((sc->ntx == 0) ? 8 : 16)) & 0x1f; /* Detect A-LUT enable and size. */ val >>= 30; sc->alut = (val == 0x3) ? 1 : ((val & (1 << sc->ntx)) ? 2 : 0); if (sc->alut) device_printf(dev, "%u A-LUT entries\n", 128 * sc->alut); /* Find configured memory windows at BAR2-5. */ sc->mw_count = 0; for (i = 2; i <= 5; i++) { mw = &sc->mw_info[sc->mw_count]; mw->mw_bar = i; mw->mw_rid = PCIR_BAR(mw->mw_bar); mw->mw_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &mw->mw_rid, RF_ACTIVE); if (mw->mw_res == NULL) continue; mw->mw_pbase = rman_get_start(mw->mw_res); mw->mw_size = rman_get_size(mw->mw_res); mw->mw_vbase = rman_get_virtual(mw->mw_res); for (j = 0; j < PLX_MAX_SPLIT; j++) mw->splits[j].mw_map_mode = VM_MEMATTR_UNCACHEABLE; sc->mw_count++; /* Skip over adjacent BAR for 64-bit BARs. */ val = pci_read_config(dev, PCIR_BAR(mw->mw_bar), 4); if ((val & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) { mw->mw_64bit = 1; i++; } } /* Try to identify B2B mode. */ i = 1; snprintf(buf, sizeof(buf), "hint.%s.%d.b2b", device_get_name(dev), device_get_unit(dev)); TUNABLE_INT_FETCH(buf, &i); if (sc->link) { device_printf(dev, "NTB-to-Root Port mode (Link Interface)\n"); sc->b2b_mw = -1; } else if (i == 0) { device_printf(dev, "NTB-to-Root Port mode (Virtual Interface)\n"); sc->b2b_mw = -1; } else { device_printf(dev, "NTB-to-NTB (back-to-back) mode\n"); /* We need at least one memory window for B2B peer access. */ if (sc->mw_count == 0) { device_printf(dev, "No memory window BARs enabled.\n"); error = ENXIO; goto out; } sc->b2b_mw = sc->mw_count - 1; /* Use half of the window for B2B, but no less then 1MB. */ mw = &sc->mw_info[sc->b2b_mw]; if (mw->mw_size >= 2 * 1024 * 1024) sc->b2b_off = mw->mw_size / 2; else sc->b2b_off = 0; } snprintf(buf, sizeof(buf), "hint.%s.%d.split", device_get_name(dev), device_get_unit(dev)); TUNABLE_INT_FETCH(buf, &sc->split); if (sc->split > 7) { device_printf(dev, "Split value is too high (%u)\n", sc->split); sc->split = 0; } else if (sc->split > 0 && sc->alut == 0) { device_printf(dev, "Can't split with disabled A-LUT\n"); sc->split = 0; } else if (sc->split > 0 && (sc->mw_count == 0 || sc->mw_info[0].mw_bar != 2)) { device_printf(dev, "Can't split disabled BAR2\n"); sc->split = 0; } else if (sc->split > 0 && (sc->b2b_mw == 0 && sc->b2b_off == 0)) { device_printf(dev, "Can't split BAR2 consumed by B2B\n"); sc->split = 0; } else if (sc->split > 0) { device_printf(dev, "Splitting BAR2 into %d memory windows\n", 1 << sc->split); } /* * Use Physical Layer User Test Pattern as additional scratchpad. * Make sure they are present and enabled by writing to them. * XXX: Its a hack, but standard 8 registers are not enough. */ sc->spad_offp1 = sc->spad_off1 = PLX_NTX_OUR_BASE(sc) + 0xc6c; sc->spad_offp2 = sc->spad_off2 = PLX_PORT_BASE(sc->ntx * 8) + 0x20c; if (sc->b2b_mw >= 0) { /* In NTB-to-NTB mode each side has own scratchpads. */ sc->spad_count1 = PLX_NUM_SPAD; bus_write_4(sc->conf_res, sc->spad_off2, 0x12345678); if (bus_read_4(sc->conf_res, sc->spad_off2) == 0x12345678) sc->spad_count2 = PLX_NUM_SPAD_PATT; } else { /* Otherwise we have share scratchpads with the peer. */ if (sc->link) { sc->spad_off1 += PLX_NUM_SPAD / 2 * 4; sc->spad_off2 += PLX_NUM_SPAD_PATT / 2 * 4; } else { sc->spad_offp1 += PLX_NUM_SPAD / 2 * 4; sc->spad_offp2 += PLX_NUM_SPAD_PATT / 2 * 4; } sc->spad_count1 = PLX_NUM_SPAD / 2; bus_write_4(sc->conf_res, sc->spad_off2, 0x12345678); if (bus_read_4(sc->conf_res, sc->spad_off2) == 0x12345678) sc->spad_count2 = PLX_NUM_SPAD_PATT / 2; } /* Apply static part of NTB configuration. */ ntb_plx_init(dev); /* Allocate and setup interrupts. */ error = ntb_plx_setup_intr(dev); if (error) goto out; /* Attach children to this controller */ error = ntb_register_device(dev); out: if (error != 0) ntb_plx_detach(dev); return (error); } static int ntb_plx_detach(device_t dev) { struct ntb_plx_softc *sc = device_get_softc(dev); struct ntb_plx_mw_info *mw; int i; /* Detach & delete all children */ ntb_unregister_device(dev); /* Disable and free interrupts. */ ntb_plx_teardown_intr(dev); /* Free memory resources. */ for (i = 0; i < sc->mw_count; i++) { mw = &sc->mw_info[i]; bus_release_resource(dev, SYS_RES_MEMORY, mw->mw_rid, mw->mw_res); } bus_release_resource(dev, SYS_RES_MEMORY, sc->conf_rid, sc->conf_res); return (0); } static int ntb_plx_port_number(device_t dev) { struct ntb_plx_softc *sc = device_get_softc(dev); return (sc->link ? 1 : 0); } static int ntb_plx_peer_port_count(device_t dev) { return (1); } static int ntb_plx_peer_port_number(device_t dev, int pidx) { struct ntb_plx_softc *sc = device_get_softc(dev); if (pidx != 0) return (-EINVAL); return (sc->link ? 0 : 1); } static int ntb_plx_peer_port_idx(device_t dev, int port) { int peer_port; peer_port = ntb_plx_peer_port_number(dev, 0); if (peer_port == -EINVAL || port != peer_port) return (-EINVAL); return (0); } static bool ntb_plx_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width) { uint16_t link; link = pcie_read_config(dev, PCIER_LINK_STA, 2); if (speed != NULL) *speed = (link & PCIEM_LINK_STA_SPEED); if (width != NULL) *width = (link & PCIEM_LINK_STA_WIDTH) >> 4; return ((link & PCIEM_LINK_STA_WIDTH) != 0); } static int ntb_plx_link_enable(device_t dev, enum ntb_speed speed __unused, enum ntb_width width __unused) { struct ntb_plx_softc *sc = device_get_softc(dev); uint32_t reg, val; /* The fact that we see the Link Interface means link is enabled. */ if (sc->link) { ntb_link_event(dev); return (0); } reg = PLX_PORT_CONTROL(sc); val = bus_read_4(sc->conf_res, reg); if ((val & (1 << (sc->port & 7))) == 0) { /* If already enabled, generate fake link event and exit. */ ntb_link_event(dev); return (0); } val &= ~(1 << (sc->port & 7)); bus_write_4(sc->conf_res, reg, val); return (0); } static int ntb_plx_link_disable(device_t dev) { struct ntb_plx_softc *sc = device_get_softc(dev); uint32_t reg, val; /* Link disable for Link Interface would be suicidal. */ if (sc->link) return (0); reg = PLX_PORT_CONTROL(sc); val = bus_read_4(sc->conf_res, reg); val |= (1 << (sc->port & 7)); bus_write_4(sc->conf_res, reg, val); return (0); } static bool ntb_plx_link_enabled(device_t dev) { struct ntb_plx_softc *sc = device_get_softc(dev); uint32_t reg, val; /* The fact that we see the Link Interface means link is enabled. */ if (sc->link) return (TRUE); reg = PLX_PORT_CONTROL(sc); val = bus_read_4(sc->conf_res, reg); return ((val & (1 << (sc->port & 7))) == 0); } static uint8_t ntb_plx_mw_count(device_t dev) { struct ntb_plx_softc *sc = device_get_softc(dev); uint8_t res; res = sc->mw_count; res += (1 << sc->split) - 1; if (sc->b2b_mw >= 0 && sc->b2b_off == 0) res--; /* B2B consumed whole window. */ return (res); } static unsigned ntb_plx_user_mw_to_idx(struct ntb_plx_softc *sc, unsigned uidx, unsigned *sp) { unsigned t; t = 1 << sc->split; if (uidx < t) { *sp = uidx; return (0); } *sp = 0; return (uidx - (t - 1)); } static int ntb_plx_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base, caddr_t *vbase, size_t *size, size_t *align, size_t *align_size, bus_addr_t *plimit) { struct ntb_plx_softc *sc = device_get_softc(dev); struct ntb_plx_mw_info *mw; size_t off, ss; unsigned sp, split; mw_idx = ntb_plx_user_mw_to_idx(sc, mw_idx, &sp); if (mw_idx >= sc->mw_count) return (EINVAL); off = 0; if (mw_idx == sc->b2b_mw) { KASSERT(sc->b2b_off != 0, ("user shouldn't get non-shared b2b mw")); off = sc->b2b_off; } mw = &sc->mw_info[mw_idx]; split = (mw->mw_bar == 2) ? sc->split : 0; ss = (mw->mw_size - off) >> split; /* Local to remote memory window parameters. */ if (base != NULL) *base = mw->mw_pbase + off + ss * sp; if (vbase != NULL) *vbase = mw->mw_vbase + off + ss * sp; if (size != NULL) *size = ss; /* * Remote to local memory window translation address alignment. * Translation address has to be aligned to the BAR size, but A-LUT * entries re-map addresses can be aligned to 1/128 or 1/256 of it. * XXX: In B2B mode we can change BAR size (and so alignmet) live, * but there is no way to report it here, so report safe value. */ if (align != NULL) { if (sc->alut && mw->mw_bar == 2) *align = (mw->mw_size - off) / 128 / sc->alut; else *align = mw->mw_size - off; } /* * Remote to local memory window size alignment. * The chip has no limit registers, but A-LUT, when available, allows * access control with granularity of 1/128 or 1/256 of the BAR size. * XXX: In B2B case we can change BAR size live, but there is no way * to report it, so report half of the BAR size, that should be safe. * In non-B2B case there is no control at all, so report the BAR size. */ if (align_size != NULL) { if (sc->alut && mw->mw_bar == 2) *align_size = (mw->mw_size - off) / 128 / sc->alut; else if (sc->b2b_mw >= 0) *align_size = (mw->mw_size - off) / 2; else *align_size = mw->mw_size - off; } /* Remote to local memory window translation address upper limit. */ if (plimit != NULL) *plimit = mw->mw_64bit ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT; return (0); } static int ntb_plx_mw_set_trans_internal(device_t dev, unsigned mw_idx) { struct ntb_plx_softc *sc = device_get_softc(dev); struct ntb_plx_mw_info *mw; uint64_t addr, eaddr, off, size, bsize, esize, val64; uint32_t val; unsigned i, sp, split; mw = &sc->mw_info[mw_idx]; off = (mw_idx == sc->b2b_mw) ? sc->b2b_off : 0; split = (mw->mw_bar == 2) ? sc->split : 0; /* Get BAR size. In case of split or B2RP we can't change it. */ if (split || sc->b2b_mw < 0) { bsize = mw->mw_size - off; } else { bsize = mw->splits[0].mw_xlat_size; if (!powerof2(bsize)) bsize = 1LL << flsll(bsize); if (bsize > 0 && bsize < 1024 * 1024) bsize = 1024 * 1024; } /* * While for B2B we can set any BAR size on a link side, for shared * window we can't go above preconfigured size due to BAR address * alignment requirements. */ if ((off & (bsize - 1)) != 0) return (EINVAL); /* In B2B mode set Link Interface BAR size/address. */ if (sc->b2b_mw >= 0 && mw->mw_64bit) { val64 = 0; if (bsize > 0) val64 = (~(bsize - 1) & ~0xfffff); val64 |= 0xc; PNTX_WRITE(sc, 0xe8 + (mw->mw_bar - 2) * 4, val64); PNTX_WRITE(sc, 0xe8 + (mw->mw_bar - 2) * 4 + 4, val64 >> 32); val64 = 0x2000000000000000 * mw->mw_bar + off; PNTX_WRITE(sc, PCIR_BAR(mw->mw_bar), val64); PNTX_WRITE(sc, PCIR_BAR(mw->mw_bar) + 4, val64 >> 32); } else if (sc->b2b_mw >= 0) { val = 0; if (bsize > 0) val = (~(bsize - 1) & ~0xfffff); PNTX_WRITE(sc, 0xe8 + (mw->mw_bar - 2) * 4, val); val64 = 0x20000000 * mw->mw_bar + off; PNTX_WRITE(sc, PCIR_BAR(mw->mw_bar), val64); } /* Set BARs address translation */ addr = split ? UINT64_MAX : mw->splits[0].mw_xlat_addr; if (mw->mw_64bit) { PNTX_WRITE(sc, 0xc3c + (mw->mw_bar - 2) * 4, addr); PNTX_WRITE(sc, 0xc3c + (mw->mw_bar - 2) * 4 + 4, addr >> 32); } else { PNTX_WRITE(sc, 0xc3c + (mw->mw_bar - 2) * 4, addr); } /* Configure and enable A-LUT if we need it. */ size = split ? 0 : mw->splits[0].mw_xlat_size; if (sc->alut && mw->mw_bar == 2 && (sc->split > 0 || ((addr & (bsize - 1)) != 0 || size != bsize))) { esize = bsize / (128 * sc->alut); for (i = sp = 0; i < 128 * sc->alut; i++) { if (i % (128 * sc->alut >> sc->split) == 0) { eaddr = addr = mw->splits[sp].mw_xlat_addr; size = mw->splits[sp++].mw_xlat_size; } val = sc->link ? 0 : 1; if (sc->alut == 1) val += 2 * sc->ntx; val *= 0x1000 * sc->alut; val += 0x38000 + i * 4 + (i >= 128 ? 0x0e00 : 0); bus_write_4(sc->conf_res, val, eaddr); bus_write_4(sc->conf_res, val + 0x400, eaddr >> 32); bus_write_4(sc->conf_res, val + 0x800, (eaddr < addr + size) ? 0x3 : 0); eaddr += esize; } NTX_WRITE(sc, 0xc94, 0x10000000); } else if (sc->alut && mw->mw_bar == 2) NTX_WRITE(sc, 0xc94, 0); return (0); } static int ntb_plx_mw_set_trans(device_t dev, unsigned mw_idx, bus_addr_t addr, size_t size) { struct ntb_plx_softc *sc = device_get_softc(dev); struct ntb_plx_mw_info *mw; unsigned sp; mw_idx = ntb_plx_user_mw_to_idx(sc, mw_idx, &sp); if (mw_idx >= sc->mw_count) return (EINVAL); mw = &sc->mw_info[mw_idx]; if (!mw->mw_64bit && ((addr & UINT32_MAX) != addr || ((addr + size) & UINT32_MAX) != (addr + size))) return (ERANGE); mw->splits[sp].mw_xlat_addr = addr; mw->splits[sp].mw_xlat_size = size; return (ntb_plx_mw_set_trans_internal(dev, mw_idx)); } static int ntb_plx_mw_clear_trans(device_t dev, unsigned mw_idx) { return (ntb_plx_mw_set_trans(dev, mw_idx, 0, 0)); } static int ntb_plx_mw_get_wc(device_t dev, unsigned mw_idx, vm_memattr_t *mode) { struct ntb_plx_softc *sc = device_get_softc(dev); struct ntb_plx_mw_info *mw; unsigned sp; mw_idx = ntb_plx_user_mw_to_idx(sc, mw_idx, &sp); if (mw_idx >= sc->mw_count) return (EINVAL); mw = &sc->mw_info[mw_idx]; *mode = mw->splits[sp].mw_map_mode; return (0); } static int ntb_plx_mw_set_wc(device_t dev, unsigned mw_idx, vm_memattr_t mode) { struct ntb_plx_softc *sc = device_get_softc(dev); struct ntb_plx_mw_info *mw; uint64_t off, ss; int rc; unsigned sp, split; mw_idx = ntb_plx_user_mw_to_idx(sc, mw_idx, &sp); if (mw_idx >= sc->mw_count) return (EINVAL); mw = &sc->mw_info[mw_idx]; if (mw->splits[sp].mw_map_mode == mode) return (0); off = 0; if (mw_idx == sc->b2b_mw) { KASSERT(sc->b2b_off != 0, ("user shouldn't get non-shared b2b mw")); off = sc->b2b_off; } split = (mw->mw_bar == 2) ? sc->split : 0; ss = (mw->mw_size - off) >> split; rc = pmap_change_attr((vm_offset_t)mw->mw_vbase + off + ss * sp, ss, mode); if (rc == 0) mw->splits[sp].mw_map_mode = mode; return (rc); } static uint8_t ntb_plx_spad_count(device_t dev) { struct ntb_plx_softc *sc = device_get_softc(dev); return (sc->spad_count1 + sc->spad_count2); } static int ntb_plx_spad_write(device_t dev, unsigned int idx, uint32_t val) { struct ntb_plx_softc *sc = device_get_softc(dev); u_int off; if (idx >= sc->spad_count1 + sc->spad_count2) return (EINVAL); if (idx < sc->spad_count1) off = sc->spad_off1 + idx * 4; else off = sc->spad_off2 + (idx - sc->spad_count1) * 4; bus_write_4(sc->conf_res, off, val); return (0); } static void ntb_plx_spad_clear(device_t dev) { struct ntb_plx_softc *sc = device_get_softc(dev); int i; for (i = 0; i < sc->spad_count1 + sc->spad_count2; i++) ntb_plx_spad_write(dev, i, 0); } static int ntb_plx_spad_read(device_t dev, unsigned int idx, uint32_t *val) { struct ntb_plx_softc *sc = device_get_softc(dev); u_int off; if (idx >= sc->spad_count1 + sc->spad_count2) return (EINVAL); if (idx < sc->spad_count1) off = sc->spad_off1 + idx * 4; else off = sc->spad_off2 + (idx - sc->spad_count1) * 4; *val = bus_read_4(sc->conf_res, off); return (0); } static int ntb_plx_peer_spad_write(device_t dev, unsigned int idx, uint32_t val) { struct ntb_plx_softc *sc = device_get_softc(dev); u_int off; if (idx >= sc->spad_count1 + sc->spad_count2) return (EINVAL); if (idx < sc->spad_count1) off = sc->spad_offp1 + idx * 4; else off = sc->spad_offp2 + (idx - sc->spad_count1) * 4; if (sc->b2b_mw >= 0) bus_write_4(sc->mw_info[sc->b2b_mw].mw_res, off, val); else bus_write_4(sc->conf_res, off, val); return (0); } static int ntb_plx_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val) { struct ntb_plx_softc *sc = device_get_softc(dev); u_int off; if (idx >= sc->spad_count1 + sc->spad_count2) return (EINVAL); if (idx < sc->spad_count1) off = sc->spad_offp1 + idx * 4; else off = sc->spad_offp2 + (idx - sc->spad_count1) * 4; if (sc->b2b_mw >= 0) *val = bus_read_4(sc->mw_info[sc->b2b_mw].mw_res, off); else *val = bus_read_4(sc->conf_res, off); return (0); } static uint64_t ntb_plx_db_valid_mask(device_t dev) { return ((1LL << PLX_NUM_DB) - 1); } static int ntb_plx_db_vector_count(device_t dev) { return (1); } static uint64_t ntb_plx_db_vector_mask(device_t dev, uint32_t vector) { if (vector > 0) return (0); return ((1LL << PLX_NUM_DB) - 1); } static void ntb_plx_db_clear(device_t dev, uint64_t bits) { struct ntb_plx_softc *sc = device_get_softc(dev); NTX_WRITE(sc, sc->link ? 0xc60 : 0xc50, bits); } static void ntb_plx_db_clear_mask(device_t dev, uint64_t bits) { struct ntb_plx_softc *sc = device_get_softc(dev); NTX_WRITE(sc, sc->link ? 0xc68 : 0xc58, bits); } static uint64_t ntb_plx_db_read(device_t dev) { struct ntb_plx_softc *sc = device_get_softc(dev); return (NTX_READ(sc, sc->link ? 0xc5c : 0xc4c)); } static void ntb_plx_db_set_mask(device_t dev, uint64_t bits) { struct ntb_plx_softc *sc = device_get_softc(dev); NTX_WRITE(sc, sc->link ? 0xc64 : 0xc54, bits); } static int ntb_plx_peer_db_addr(device_t dev, bus_addr_t *db_addr, vm_size_t *db_size) { struct ntb_plx_softc *sc = device_get_softc(dev); struct ntb_plx_mw_info *mw; KASSERT((db_addr != NULL && db_size != NULL), ("must be non-NULL")); if (sc->b2b_mw >= 0) { mw = &sc->mw_info[sc->b2b_mw]; *db_addr = (uint64_t)mw->mw_pbase + PLX_NTX_BASE(sc) + 0xc4c; } else { *db_addr = rman_get_start(sc->conf_res) + PLX_NTX_BASE(sc); *db_addr += sc->link ? 0xc4c : 0xc5c; } *db_size = 4; return (0); } static void ntb_plx_peer_db_set(device_t dev, uint64_t bit) { struct ntb_plx_softc *sc = device_get_softc(dev); if (sc->b2b_mw >= 0) BNTX_WRITE(sc, 0xc4c, bit); else NTX_WRITE(sc, sc->link ? 0xc4c : 0xc5c, bit); } static device_method_t ntb_plx_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ntb_plx_probe), DEVMETHOD(device_attach, ntb_plx_attach), DEVMETHOD(device_detach, ntb_plx_detach), /* Bus interface */ - DEVMETHOD(bus_child_location_str, ntb_child_location_str), + DEVMETHOD(bus_child_location, ntb_child_location), DEVMETHOD(bus_print_child, ntb_print_child), DEVMETHOD(bus_get_dma_tag, ntb_get_dma_tag), /* NTB interface */ DEVMETHOD(ntb_port_number, ntb_plx_port_number), DEVMETHOD(ntb_peer_port_count, ntb_plx_peer_port_count), DEVMETHOD(ntb_peer_port_number, ntb_plx_peer_port_number), DEVMETHOD(ntb_peer_port_idx, ntb_plx_peer_port_idx), DEVMETHOD(ntb_link_is_up, ntb_plx_link_is_up), DEVMETHOD(ntb_link_enable, ntb_plx_link_enable), DEVMETHOD(ntb_link_disable, ntb_plx_link_disable), DEVMETHOD(ntb_link_enabled, ntb_plx_link_enabled), DEVMETHOD(ntb_mw_count, ntb_plx_mw_count), DEVMETHOD(ntb_mw_get_range, ntb_plx_mw_get_range), DEVMETHOD(ntb_mw_set_trans, ntb_plx_mw_set_trans), DEVMETHOD(ntb_mw_clear_trans, ntb_plx_mw_clear_trans), DEVMETHOD(ntb_mw_get_wc, ntb_plx_mw_get_wc), DEVMETHOD(ntb_mw_set_wc, ntb_plx_mw_set_wc), DEVMETHOD(ntb_spad_count, ntb_plx_spad_count), DEVMETHOD(ntb_spad_clear, ntb_plx_spad_clear), DEVMETHOD(ntb_spad_write, ntb_plx_spad_write), DEVMETHOD(ntb_spad_read, ntb_plx_spad_read), DEVMETHOD(ntb_peer_spad_write, ntb_plx_peer_spad_write), DEVMETHOD(ntb_peer_spad_read, ntb_plx_peer_spad_read), DEVMETHOD(ntb_db_valid_mask, ntb_plx_db_valid_mask), DEVMETHOD(ntb_db_vector_count, ntb_plx_db_vector_count), DEVMETHOD(ntb_db_vector_mask, ntb_plx_db_vector_mask), DEVMETHOD(ntb_db_clear, ntb_plx_db_clear), DEVMETHOD(ntb_db_clear_mask, ntb_plx_db_clear_mask), DEVMETHOD(ntb_db_read, ntb_plx_db_read), DEVMETHOD(ntb_db_set_mask, ntb_plx_db_set_mask), DEVMETHOD(ntb_peer_db_addr, ntb_plx_peer_db_addr), DEVMETHOD(ntb_peer_db_set, ntb_plx_peer_db_set), DEVMETHOD_END }; static DEFINE_CLASS_0(ntb_hw, ntb_plx_driver, ntb_plx_methods, sizeof(struct ntb_plx_softc)); DRIVER_MODULE(ntb_hw_plx, pci, ntb_plx_driver, ntb_hw_devclass, NULL, NULL); MODULE_DEPEND(ntb_hw_plx, ntb, 1, 1, 1); MODULE_VERSION(ntb_hw_plx, 1); diff --git a/sys/dev/ntb/ntb_transport.c b/sys/dev/ntb/ntb_transport.c index d6bccfb0f08c..90420db0b9aa 100644 --- a/sys/dev/ntb/ntb_transport.c +++ b/sys/dev/ntb/ntb_transport.c @@ -1,1698 +1,1698 @@ /*- * Copyright (c) 2016-2017 Alexander Motin * Copyright (C) 2013 Intel Corporation * Copyright (C) 2015 EMC Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * The Non-Transparent Bridge (NTB) is a device that allows you to connect * two or more systems using a PCI-e links, providing remote memory access. * * This module contains a transport for sending and receiving messages by * writing to remote memory window(s) provided by underlying NTB device. * * NOTE: Much of the code in this module is shared with Linux. Any patches may * be picked up and redistributed in Linux with a dual GPL/BSD license. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include "ntb.h" #include "ntb_transport.h" #define KTR_NTB KTR_SPARE3 #define NTB_TRANSPORT_VERSION 4 static SYSCTL_NODE(_hw, OID_AUTO, ntb_transport, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "ntb_transport"); static unsigned g_ntb_transport_debug_level; SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ntb_transport_debug_level, 0, "ntb_transport log level -- higher is more verbose"); #define ntb_printf(lvl, ...) do { \ if ((lvl) <= g_ntb_transport_debug_level) { \ printf(__VA_ARGS__); \ } \ } while (0) static unsigned transport_mtu = 0x10000; static uint64_t max_mw_size = 256*1024*1024; SYSCTL_UQUAD(_hw_ntb_transport, OID_AUTO, max_mw_size, CTLFLAG_RDTUN, &max_mw_size, 0, "If enabled (non-zero), limit the size of large memory windows. " "Both sides of the NTB MUST set the same value here."); static unsigned enable_xeon_watchdog; SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, enable_xeon_watchdog, CTLFLAG_RDTUN, &enable_xeon_watchdog, 0, "If non-zero, write a register every second to " "keep a watchdog from tearing down the NTB link"); STAILQ_HEAD(ntb_queue_list, ntb_queue_entry); typedef uint32_t ntb_q_idx_t; struct ntb_queue_entry { /* ntb_queue list reference */ STAILQ_ENTRY(ntb_queue_entry) entry; /* info on data to be transferred */ void *cb_data; void *buf; uint32_t len; uint32_t flags; struct ntb_transport_qp *qp; struct ntb_payload_header *x_hdr; ntb_q_idx_t index; }; struct ntb_rx_info { ntb_q_idx_t entry; }; struct ntb_transport_qp { struct ntb_transport_ctx *transport; device_t dev; void *cb_data; bool client_ready; volatile bool link_is_up; uint8_t qp_num; /* Only 64 QPs are allowed. 0-63 */ struct ntb_rx_info *rx_info; struct ntb_rx_info *remote_rx_info; void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); struct ntb_queue_list tx_free_q; struct mtx ntb_tx_free_q_lock; caddr_t tx_mw; bus_addr_t tx_mw_phys; ntb_q_idx_t tx_index; ntb_q_idx_t tx_max_entry; uint64_t tx_max_frame; void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); struct ntb_queue_list rx_post_q; struct ntb_queue_list rx_pend_q; /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ struct mtx ntb_rx_q_lock; struct task rxc_db_work; struct taskqueue *rxc_tq; caddr_t rx_buff; ntb_q_idx_t rx_index; ntb_q_idx_t rx_max_entry; uint64_t rx_max_frame; void (*event_handler)(void *data, enum ntb_link_event status); struct callout link_work; struct callout rx_full; uint64_t last_rx_no_buf; /* Stats */ uint64_t rx_bytes; uint64_t rx_pkts; uint64_t rx_ring_empty; uint64_t rx_err_no_buf; uint64_t rx_err_oflow; uint64_t rx_err_ver; uint64_t tx_bytes; uint64_t tx_pkts; uint64_t tx_ring_full; uint64_t tx_err_no_buf; struct mtx tx_lock; }; struct ntb_transport_mw { vm_paddr_t phys_addr; size_t phys_size; size_t xlat_align; size_t xlat_align_size; bus_addr_t addr_limit; /* Tx buff is vbase / phys_addr / tx_size */ caddr_t vbase; size_t tx_size; /* Rx buff is virt_addr / dma_addr / rx_size */ bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; caddr_t virt_addr; bus_addr_t dma_addr; size_t rx_size; /* rx_size increased to size alignment requirements of the hardware. */ size_t buff_size; }; struct ntb_transport_child { device_t dev; int consumer; int qpoff; int qpcnt; struct ntb_transport_child *next; }; struct ntb_transport_ctx { device_t dev; struct ntb_transport_child *child; struct ntb_transport_mw *mw_vec; struct ntb_transport_qp *qp_vec; int compact; unsigned mw_count; unsigned qp_count; uint64_t qp_bitmap; volatile bool link_is_up; enum ntb_speed link_speed; enum ntb_width link_width; struct callout link_work; struct callout link_watchdog; struct task link_cleanup; }; enum { NTBT_DESC_DONE_FLAG = 1 << 0, NTBT_LINK_DOWN_FLAG = 1 << 1, }; struct ntb_payload_header { ntb_q_idx_t ver; uint32_t len; uint32_t flags; }; enum { /* * The order of this enum is part of the remote protocol. Do not * reorder without bumping protocol version (and it's probably best * to keep the protocol in lock-step with the Linux NTB driver. */ NTBT_VERSION = 0, NTBT_QP_LINKS, NTBT_NUM_QPS, NTBT_NUM_MWS, /* * N.B.: transport_link_work assumes MW1 enums = MW0 + 2. */ NTBT_MW0_SZ_HIGH, NTBT_MW0_SZ_LOW, NTBT_MW1_SZ_HIGH, NTBT_MW1_SZ_LOW, /* * Some NTB-using hardware have a watchdog to work around NTB hangs; if * a register or doorbell isn't written every few seconds, the link is * torn down. Write an otherwise unused register every few seconds to * work around this watchdog. */ NTBT_WATCHDOG_SPAD = 15 }; /* * Compart version of sratchpad protocol, using twice less registers. */ enum { NTBTC_PARAMS = 0, /* NUM_QPS << 24 + NUM_MWS << 16 + VERSION */ NTBTC_QP_LINKS, /* QP links status */ NTBTC_MW0_SZ, /* MW size limited to 32 bits. */ }; #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) #define NTB_QP_DEF_NUM_ENTRIES 100 #define NTB_LINK_DOWN_TIMEOUT 100 static int ntb_transport_probe(device_t dev); static int ntb_transport_attach(device_t dev); static int ntb_transport_detach(device_t dev); static void ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num); static int ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry); static void ntb_transport_rxc_db(void *arg, int pending); static int ntb_process_rxc(struct ntb_transport_qp *qp); static void ntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset); static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data); static void ntb_complete_rxc(struct ntb_transport_qp *qp); static void ntb_transport_doorbell_callback(void *data, uint32_t vector); static void ntb_transport_event_callback(void *data); static void ntb_transport_link_work(void *arg); static int ntb_set_mw(struct ntb_transport_ctx *, int num_mw, size_t size); static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw); static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num); static void ntb_qp_link_work(void *arg); static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt); static void ntb_transport_link_cleanup_work(void *, int); static void ntb_qp_link_down(struct ntb_transport_qp *qp); static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp); static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp); static void ntb_send_link_down(struct ntb_transport_qp *qp); static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, struct ntb_queue_list *list); static struct ntb_queue_entry *ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list); static struct ntb_queue_entry *ntb_list_mv(struct mtx *lock, struct ntb_queue_list *from, struct ntb_queue_list *to); static void xeon_link_watchdog_hb(void *); static const struct ntb_ctx_ops ntb_transport_ops = { .link_event = ntb_transport_event_callback, .db_event = ntb_transport_doorbell_callback, }; MALLOC_DEFINE(M_NTB_T, "ntb_transport", "ntb transport driver"); static inline void iowrite32(uint32_t val, void *addr) { bus_space_write_4(X86_BUS_SPACE_MEM, 0/* HACK */, (uintptr_t)addr, val); } /* Transport Init and teardown */ static void xeon_link_watchdog_hb(void *arg) { struct ntb_transport_ctx *nt; nt = arg; ntb_spad_write(nt->dev, NTBT_WATCHDOG_SPAD, 0); callout_reset(&nt->link_watchdog, 1 * hz, xeon_link_watchdog_hb, nt); } static int ntb_transport_probe(device_t dev) { device_set_desc(dev, "NTB Transport"); return (0); } static int ntb_transport_attach(device_t dev) { struct ntb_transport_ctx *nt = device_get_softc(dev); struct ntb_transport_child **cpp = &nt->child; struct ntb_transport_child *nc; struct ntb_transport_mw *mw; uint64_t db_bitmap; int rc, i, db_count, spad_count, qp, qpu, qpo, qpt; char cfg[128] = ""; char buf[32]; char *n, *np, *c, *name; nt->dev = dev; nt->mw_count = ntb_mw_count(dev); spad_count = ntb_spad_count(dev); db_bitmap = ntb_db_valid_mask(dev); db_count = flsll(db_bitmap); KASSERT(db_bitmap == ((uint64_t)1 << db_count) - 1, ("Doorbells are not sequential (%jx).\n", db_bitmap)); if (nt->mw_count == 0) { device_printf(dev, "At least 1 memory window required.\n"); return (ENXIO); } nt->compact = (spad_count < 4 + 2 * nt->mw_count); snprintf(buf, sizeof(buf), "hint.%s.%d.compact", device_get_name(dev), device_get_unit(dev)); TUNABLE_INT_FETCH(buf, &nt->compact); if (nt->compact) { if (spad_count < 3) { device_printf(dev, "At least 3 scratchpads required.\n"); return (ENXIO); } if (spad_count < 2 + nt->mw_count) { nt->mw_count = spad_count - 2; device_printf(dev, "Scratchpads enough only for %d " "memory windows.\n", nt->mw_count); } } else { if (spad_count < 6) { device_printf(dev, "At least 6 scratchpads required.\n"); return (ENXIO); } if (spad_count < 4 + 2 * nt->mw_count) { nt->mw_count = (spad_count - 4) / 2; device_printf(dev, "Scratchpads enough only for %d " "memory windows.\n", nt->mw_count); } } if (db_bitmap == 0) { device_printf(dev, "At least one doorbell required.\n"); return (ENXIO); } nt->mw_vec = malloc(nt->mw_count * sizeof(*nt->mw_vec), M_NTB_T, M_WAITOK | M_ZERO); for (i = 0; i < nt->mw_count; i++) { mw = &nt->mw_vec[i]; rc = ntb_mw_get_range(dev, i, &mw->phys_addr, &mw->vbase, &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size, &mw->addr_limit); if (rc != 0) goto err; mw->tx_size = mw->phys_size; if (max_mw_size != 0 && mw->tx_size > max_mw_size) { device_printf(dev, "Memory window %d limited from " "%ju to %ju\n", i, (uintmax_t)mw->tx_size, max_mw_size); mw->tx_size = max_mw_size; } if (nt->compact && mw->tx_size > UINT32_MAX) { device_printf(dev, "Memory window %d is too big " "(%ju)\n", i, (uintmax_t)mw->tx_size); rc = ENXIO; goto err; } mw->rx_size = 0; mw->buff_size = 0; mw->virt_addr = NULL; mw->dma_addr = 0; rc = ntb_mw_set_wc(dev, i, VM_MEMATTR_WRITE_COMBINING); if (rc) ntb_printf(0, "Unable to set mw%d caching\n", i); /* * Try to preallocate receive memory early, since there may * be not enough contiguous memory later. It is quite likely * that NTB windows are symmetric and this allocation remain, * but even if not, we will just reallocate it later. */ ntb_set_mw(nt, i, mw->tx_size); } qpu = 0; qpo = imin(db_count, nt->mw_count); qpt = db_count; snprintf(buf, sizeof(buf), "hint.%s.%d.config", device_get_name(dev), device_get_unit(dev)); TUNABLE_STR_FETCH(buf, cfg, sizeof(cfg)); n = cfg; i = 0; while ((c = strsep(&n, ",")) != NULL) { np = c; name = strsep(&np, ":"); if (name != NULL && name[0] == 0) name = NULL; qp = (np && np[0] != 0) ? strtol(np, NULL, 10) : qpo - qpu; if (qp <= 0) qp = 1; if (qp > qpt - qpu) { device_printf(dev, "Not enough resources for config\n"); break; } nc = malloc(sizeof(*nc), M_DEVBUF, M_WAITOK | M_ZERO); nc->consumer = i; nc->qpoff = qpu; nc->qpcnt = qp; nc->dev = device_add_child(dev, name, -1); if (nc->dev == NULL) { device_printf(dev, "Can not add child.\n"); break; } device_set_ivars(nc->dev, nc); *cpp = nc; cpp = &nc->next; if (bootverbose) { device_printf(dev, "%d \"%s\": queues %d", i, name, qpu); if (qp > 1) printf("-%d", qpu + qp - 1); printf("\n"); } qpu += qp; i++; } nt->qp_count = qpu; nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_T, M_WAITOK | M_ZERO); for (i = 0; i < nt->qp_count; i++) ntb_transport_init_queue(nt, i); callout_init(&nt->link_work, 0); callout_init(&nt->link_watchdog, 0); TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt); nt->link_is_up = false; rc = ntb_set_ctx(dev, nt, &ntb_transport_ops); if (rc != 0) goto err; ntb_link_enable(dev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); for (i = 0; i < nt->mw_count; i++) { mw = &nt->mw_vec[i]; rc = ntb_mw_set_trans(nt->dev, i, mw->dma_addr, mw->buff_size); if (rc != 0) ntb_printf(0, "load time mw%d xlat fails, rc %d\n", i, rc); } if (enable_xeon_watchdog != 0) callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt); bus_generic_attach(dev); return (0); err: free(nt->qp_vec, M_NTB_T); free(nt->mw_vec, M_NTB_T); return (rc); } static int ntb_transport_detach(device_t dev) { struct ntb_transport_ctx *nt = device_get_softc(dev); struct ntb_transport_child **cpp = &nt->child; struct ntb_transport_child *nc; int error = 0, i; while ((nc = *cpp) != NULL) { *cpp = (*cpp)->next; error = device_delete_child(dev, nc->dev); if (error) break; free(nc, M_DEVBUF); } KASSERT(nt->qp_bitmap == 0, ("Some queues not freed on detach (%jx)", nt->qp_bitmap)); ntb_transport_link_cleanup(nt); taskqueue_drain(taskqueue_swi, &nt->link_cleanup); callout_drain(&nt->link_work); callout_drain(&nt->link_watchdog); ntb_link_disable(dev); ntb_clear_ctx(dev); for (i = 0; i < nt->mw_count; i++) ntb_free_mw(nt, i); free(nt->qp_vec, M_NTB_T); free(nt->mw_vec, M_NTB_T); return (0); } static int ntb_transport_print_child(device_t dev, device_t child) { struct ntb_transport_child *nc = device_get_ivars(child); int retval; retval = bus_print_child_header(dev, child); if (nc->qpcnt > 0) { printf(" queue %d", nc->qpoff); if (nc->qpcnt > 1) printf("-%d", nc->qpoff + nc->qpcnt - 1); } retval += printf(" at consumer %d", nc->consumer); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static int -ntb_transport_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) +ntb_transport_child_location(device_t dev, device_t child, struct sbuf *sb) { struct ntb_transport_child *nc = device_get_ivars(child); - snprintf(buf, buflen, "consumer=%d", nc->consumer); + sbuf_printf(sb, "consumer=%d", nc->consumer); return (0); } int ntb_transport_queue_count(device_t dev) { struct ntb_transport_child *nc = device_get_ivars(dev); return (nc->qpcnt); } static void ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num) { struct ntb_transport_mw *mw; struct ntb_transport_qp *qp; vm_paddr_t mw_base; uint64_t qp_offset; size_t tx_size; unsigned num_qps_mw, mw_num, mw_count; mw_count = nt->mw_count; mw_num = QP_TO_MW(nt, qp_num); mw = &nt->mw_vec[mw_num]; qp = &nt->qp_vec[qp_num]; qp->qp_num = qp_num; qp->transport = nt; qp->dev = nt->dev; qp->client_ready = false; qp->event_handler = NULL; ntb_qp_link_down_reset(qp); if (mw_num < nt->qp_count % mw_count) num_qps_mw = nt->qp_count / mw_count + 1; else num_qps_mw = nt->qp_count / mw_count; mw_base = mw->phys_addr; tx_size = mw->tx_size / num_qps_mw; qp_offset = tx_size * (qp_num / mw_count); qp->tx_mw = mw->vbase + qp_offset; KASSERT(qp->tx_mw != NULL, ("uh oh?")); /* XXX Assumes that a vm_paddr_t is equivalent to bus_addr_t */ qp->tx_mw_phys = mw_base + qp_offset; KASSERT(qp->tx_mw_phys != 0, ("uh oh?")); tx_size -= sizeof(struct ntb_rx_info); qp->rx_info = (void *)(qp->tx_mw + tx_size); /* Due to house-keeping, there must be at least 2 buffs */ qp->tx_max_frame = qmin(transport_mtu, tx_size / 2); qp->tx_max_entry = tx_size / qp->tx_max_frame; callout_init(&qp->link_work, 0); callout_init(&qp->rx_full, 1); mtx_init(&qp->ntb_rx_q_lock, "ntb rx q", NULL, MTX_SPIN); mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN); mtx_init(&qp->tx_lock, "ntb transport tx", NULL, MTX_DEF); TASK_INIT(&qp->rxc_db_work, 0, ntb_transport_rxc_db, qp); qp->rxc_tq = taskqueue_create("ntbt_rx", M_WAITOK, taskqueue_thread_enqueue, &qp->rxc_tq); taskqueue_start_threads(&qp->rxc_tq, 1, PI_NET, "%s rx%d", device_get_nameunit(nt->dev), qp_num); STAILQ_INIT(&qp->rx_post_q); STAILQ_INIT(&qp->rx_pend_q); STAILQ_INIT(&qp->tx_free_q); } void ntb_transport_free_queue(struct ntb_transport_qp *qp) { struct ntb_transport_ctx *nt = qp->transport; struct ntb_queue_entry *entry; callout_drain(&qp->link_work); ntb_db_set_mask(qp->dev, 1ull << qp->qp_num); taskqueue_drain_all(qp->rxc_tq); taskqueue_free(qp->rxc_tq); qp->cb_data = NULL; qp->rx_handler = NULL; qp->tx_handler = NULL; qp->event_handler = NULL; while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) free(entry, M_NTB_T); while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) free(entry, M_NTB_T); while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) free(entry, M_NTB_T); nt->qp_bitmap &= ~(1 << qp->qp_num); } /** * ntb_transport_create_queue - Create a new NTB transport layer queue * @rx_handler: receive callback function * @tx_handler: transmit callback function * @event_handler: event callback function * * Create a new NTB transport layer queue and provide the queue with a callback * routine for both transmit and receive. The receive callback routine will be * used to pass up data when the transport has received it on the queue. The * transmit callback routine will be called when the transport has completed the * transmission of the data on the queue and the data is ready to be freed. * * RETURNS: pointer to newly created ntb_queue, NULL on error. */ struct ntb_transport_qp * ntb_transport_create_queue(device_t dev, int q, const struct ntb_queue_handlers *handlers, void *data) { struct ntb_transport_child *nc = device_get_ivars(dev); struct ntb_transport_ctx *nt = device_get_softc(device_get_parent(dev)); struct ntb_queue_entry *entry; struct ntb_transport_qp *qp; int i; if (q < 0 || q >= nc->qpcnt) return (NULL); qp = &nt->qp_vec[nc->qpoff + q]; nt->qp_bitmap |= (1 << qp->qp_num); qp->cb_data = data; qp->rx_handler = handlers->rx_handler; qp->tx_handler = handlers->tx_handler; qp->event_handler = handlers->event_handler; for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO); entry->cb_data = data; entry->buf = NULL; entry->len = transport_mtu; entry->qp = qp; ntb_list_add(&qp->ntb_rx_q_lock, entry, &qp->rx_pend_q); } for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO); entry->qp = qp; ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); } ntb_db_clear(dev, 1ull << qp->qp_num); return (qp); } /** * ntb_transport_link_up - Notify NTB transport of client readiness to use queue * @qp: NTB transport layer queue to be enabled * * Notify NTB transport layer of client readiness to use queue */ void ntb_transport_link_up(struct ntb_transport_qp *qp) { struct ntb_transport_ctx *nt = qp->transport; qp->client_ready = true; ntb_printf(2, "qp %d client ready\n", qp->qp_num); if (nt->link_is_up) callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); } /* Transport Tx */ /** * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry * @qp: NTB transport layer queue the entry is to be enqueued on * @cb: per buffer pointer for callback function to use * @data: pointer to data buffer that will be sent * @len: length of the data buffer * * Enqueue a new transmit buffer onto the transport queue from which a NTB * payload will be transmitted. This assumes that a lock is being held to * serialize access to the qp. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) { struct ntb_queue_entry *entry; int rc; if (!qp->link_is_up || len == 0) { CTR0(KTR_NTB, "TX: link not up"); return (EINVAL); } entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); if (entry == NULL) { CTR0(KTR_NTB, "TX: could not get entry from tx_free_q"); qp->tx_err_no_buf++; return (EBUSY); } CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry); entry->cb_data = cb; entry->buf = data; entry->len = len; entry->flags = 0; mtx_lock(&qp->tx_lock); rc = ntb_process_tx(qp, entry); mtx_unlock(&qp->tx_lock); if (rc != 0) { ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); CTR1(KTR_NTB, "TX: process_tx failed. Returning entry %p to tx_free_q", entry); } return (rc); } static void ntb_tx_copy_callback(void *data) { struct ntb_queue_entry *entry = data; struct ntb_transport_qp *qp = entry->qp; struct ntb_payload_header *hdr = entry->x_hdr; iowrite32(entry->flags | NTBT_DESC_DONE_FLAG, &hdr->flags); CTR1(KTR_NTB, "TX: hdr %p set DESC_DONE", hdr); ntb_peer_db_set(qp->dev, 1ull << qp->qp_num); /* * The entry length can only be zero if the packet is intended to be a * "link down" or similar. Since no payload is being sent in these * cases, there is nothing to add to the completion queue. */ if (entry->len > 0) { qp->tx_bytes += entry->len; if (qp->tx_handler) qp->tx_handler(qp, qp->cb_data, entry->buf, entry->len); else m_freem(entry->buf); entry->buf = NULL; } CTR3(KTR_NTB, "TX: entry %p sent. hdr->ver = %u, hdr->flags = 0x%x, Returning " "to tx_free_q", entry, hdr->ver, hdr->flags); ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); } static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void *offset) { CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset); if (entry->buf != NULL) { m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset); /* * Ensure that the data is fully copied before setting the * flags */ wmb(); } ntb_tx_copy_callback(entry); } static void ntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) { struct ntb_payload_header *hdr; void *offset; offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame - sizeof(struct ntb_payload_header)); entry->x_hdr = hdr; iowrite32(entry->len, &hdr->len); iowrite32(qp->tx_pkts, &hdr->ver); ntb_memcpy_tx(entry, offset); } static int ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) { CTR3(KTR_NTB, "TX: process_tx: tx_pkts=%lu, tx_index=%u, remote entry=%u", qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry); if (qp->tx_index == qp->remote_rx_info->entry) { CTR0(KTR_NTB, "TX: ring full"); qp->tx_ring_full++; return (EAGAIN); } if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { if (qp->tx_handler != NULL) qp->tx_handler(qp, qp->cb_data, entry->buf, EIO); else m_freem(entry->buf); entry->buf = NULL; ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); CTR1(KTR_NTB, "TX: frame too big. returning entry %p to tx_free_q", entry); return (0); } CTR2(KTR_NTB, "TX: copying entry %p to index %u", entry, qp->tx_index); ntb_async_tx(qp, entry); qp->tx_index++; qp->tx_index %= qp->tx_max_entry; qp->tx_pkts++; return (0); } /* Transport Rx */ static void ntb_transport_rxc_db(void *arg, int pending __unused) { struct ntb_transport_qp *qp = arg; uint64_t qp_mask = 1ull << qp->qp_num; int rc; CTR0(KTR_NTB, "RX: transport_rx"); again: while ((rc = ntb_process_rxc(qp)) == 0) ; CTR1(KTR_NTB, "RX: process_rxc returned %d", rc); if ((ntb_db_read(qp->dev) & qp_mask) != 0) { /* If db is set, clear it and check queue once more. */ ntb_db_clear(qp->dev, qp_mask); goto again; } if (qp->link_is_up) ntb_db_clear_mask(qp->dev, qp_mask); } static int ntb_process_rxc(struct ntb_transport_qp *qp) { struct ntb_payload_header *hdr; struct ntb_queue_entry *entry; caddr_t offset; offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; hdr = (void *)(offset + qp->rx_max_frame - sizeof(struct ntb_payload_header)); CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index); if ((hdr->flags & NTBT_DESC_DONE_FLAG) == 0) { CTR0(KTR_NTB, "RX: hdr not done"); qp->rx_ring_empty++; return (EAGAIN); } if ((hdr->flags & NTBT_LINK_DOWN_FLAG) != 0) { CTR0(KTR_NTB, "RX: link down"); ntb_qp_link_down(qp); hdr->flags = 0; return (EAGAIN); } if (hdr->ver != (uint32_t)qp->rx_pkts) { CTR2(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). " "Returning entry to rx_pend_q", hdr->ver, qp->rx_pkts); qp->rx_err_ver++; return (EIO); } entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); if (entry == NULL) { qp->rx_err_no_buf++; CTR0(KTR_NTB, "RX: No entries in rx_pend_q"); return (EAGAIN); } callout_stop(&qp->rx_full); CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry); entry->x_hdr = hdr; entry->index = qp->rx_index; if (hdr->len > entry->len) { CTR2(KTR_NTB, "RX: len too long. Wanted %ju got %ju", (uintmax_t)hdr->len, (uintmax_t)entry->len); qp->rx_err_oflow++; entry->len = -EIO; entry->flags |= NTBT_DESC_DONE_FLAG; ntb_complete_rxc(qp); } else { qp->rx_bytes += hdr->len; qp->rx_pkts++; CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts); entry->len = hdr->len; ntb_memcpy_rx(qp, entry, offset); } qp->rx_index++; qp->rx_index %= qp->rx_max_entry; return (0); } static void ntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset) { struct ifnet *ifp = entry->cb_data; unsigned int len = entry->len; CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset); entry->buf = (void *)m_devget(offset, len, 0, ifp, NULL); if (entry->buf == NULL) entry->len = -ENOMEM; /* Ensure that the data is globally visible before clearing the flag */ wmb(); CTR2(KTR_NTB, "RX: copied entry %p to mbuf %p.", entry, entry->buf); ntb_rx_copy_callback(qp, entry); } static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data) { struct ntb_queue_entry *entry; entry = data; entry->flags |= NTBT_DESC_DONE_FLAG; ntb_complete_rxc(qp); } static void ntb_complete_rxc(struct ntb_transport_qp *qp) { struct ntb_queue_entry *entry; struct mbuf *m; unsigned len; CTR0(KTR_NTB, "RX: rx_completion_task"); mtx_lock_spin(&qp->ntb_rx_q_lock); while (!STAILQ_EMPTY(&qp->rx_post_q)) { entry = STAILQ_FIRST(&qp->rx_post_q); if ((entry->flags & NTBT_DESC_DONE_FLAG) == 0) break; entry->x_hdr->flags = 0; iowrite32(entry->index, &qp->rx_info->entry); STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry); len = entry->len; m = entry->buf; /* * Re-initialize queue_entry for reuse; rx_handler takes * ownership of the mbuf. */ entry->buf = NULL; entry->len = transport_mtu; entry->cb_data = qp->cb_data; STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry); mtx_unlock_spin(&qp->ntb_rx_q_lock); CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m); if (qp->rx_handler != NULL && qp->client_ready) qp->rx_handler(qp, qp->cb_data, m, len); else m_freem(m); mtx_lock_spin(&qp->ntb_rx_q_lock); } mtx_unlock_spin(&qp->ntb_rx_q_lock); } static void ntb_transport_doorbell_callback(void *data, uint32_t vector) { struct ntb_transport_ctx *nt = data; struct ntb_transport_qp *qp; uint64_t vec_mask; unsigned qp_num; vec_mask = ntb_db_vector_mask(nt->dev, vector); vec_mask &= nt->qp_bitmap; if ((vec_mask & (vec_mask - 1)) != 0) vec_mask &= ntb_db_read(nt->dev); if (vec_mask != 0) { ntb_db_set_mask(nt->dev, vec_mask); ntb_db_clear(nt->dev, vec_mask); } while (vec_mask != 0) { qp_num = ffsll(vec_mask) - 1; qp = &nt->qp_vec[qp_num]; if (qp->link_is_up) taskqueue_enqueue(qp->rxc_tq, &qp->rxc_db_work); vec_mask &= ~(1ull << qp_num); } } /* Link Event handler */ static void ntb_transport_event_callback(void *data) { struct ntb_transport_ctx *nt = data; if (ntb_link_is_up(nt->dev, &nt->link_speed, &nt->link_width)) { ntb_printf(1, "HW link up\n"); callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); } else { ntb_printf(1, "HW link down\n"); taskqueue_enqueue(taskqueue_swi, &nt->link_cleanup); } } /* Link bring up */ static void ntb_transport_link_work(void *arg) { struct ntb_transport_ctx *nt = arg; struct ntb_transport_mw *mw; device_t dev = nt->dev; struct ntb_transport_qp *qp; uint64_t val64, size; uint32_t val; unsigned i; int rc; /* send the local info, in the opposite order of the way we read it */ if (nt->compact) { for (i = 0; i < nt->mw_count; i++) { size = nt->mw_vec[i].tx_size; KASSERT(size <= UINT32_MAX, ("size too big (%jx)", size)); ntb_peer_spad_write(dev, NTBTC_MW0_SZ + i, size); } ntb_peer_spad_write(dev, NTBTC_QP_LINKS, 0); ntb_peer_spad_write(dev, NTBTC_PARAMS, (nt->qp_count << 24) | (nt->mw_count << 16) | NTB_TRANSPORT_VERSION); } else { for (i = 0; i < nt->mw_count; i++) { size = nt->mw_vec[i].tx_size; ntb_peer_spad_write(dev, NTBT_MW0_SZ_HIGH + (i * 2), size >> 32); ntb_peer_spad_write(dev, NTBT_MW0_SZ_LOW + (i * 2), size); } ntb_peer_spad_write(dev, NTBT_NUM_MWS, nt->mw_count); ntb_peer_spad_write(dev, NTBT_NUM_QPS, nt->qp_count); ntb_peer_spad_write(dev, NTBT_QP_LINKS, 0); ntb_peer_spad_write(dev, NTBT_VERSION, NTB_TRANSPORT_VERSION); } /* Query the remote side for its info */ val = 0; if (nt->compact) { ntb_spad_read(dev, NTBTC_PARAMS, &val); if (val != ((nt->qp_count << 24) | (nt->mw_count << 16) | NTB_TRANSPORT_VERSION)) goto out; } else { ntb_spad_read(dev, NTBT_VERSION, &val); if (val != NTB_TRANSPORT_VERSION) goto out; ntb_spad_read(dev, NTBT_NUM_QPS, &val); if (val != nt->qp_count) goto out; ntb_spad_read(dev, NTBT_NUM_MWS, &val); if (val != nt->mw_count) goto out; } for (i = 0; i < nt->mw_count; i++) { if (nt->compact) { ntb_spad_read(dev, NTBTC_MW0_SZ + i, &val); val64 = val; } else { ntb_spad_read(dev, NTBT_MW0_SZ_HIGH + (i * 2), &val); val64 = (uint64_t)val << 32; ntb_spad_read(dev, NTBT_MW0_SZ_LOW + (i * 2), &val); val64 |= val; } mw = &nt->mw_vec[i]; mw->rx_size = val64; val64 = roundup(val64, mw->xlat_align_size); if (mw->buff_size != val64) { rc = ntb_set_mw(nt, i, val64); if (rc != 0) { ntb_printf(0, "link up set mw%d fails, rc %d\n", i, rc); goto free_mws; } /* Notify HW the memory location of the receive buffer */ rc = ntb_mw_set_trans(nt->dev, i, mw->dma_addr, mw->buff_size); if (rc != 0) { ntb_printf(0, "link up mw%d xlat fails, rc %d\n", i, rc); goto free_mws; } } } nt->link_is_up = true; ntb_printf(1, "transport link up\n"); for (i = 0; i < nt->qp_count; i++) { qp = &nt->qp_vec[i]; ntb_transport_setup_qp_mw(nt, i); if (qp->client_ready) callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); } return; free_mws: for (i = 0; i < nt->mw_count; i++) ntb_free_mw(nt, i); out: if (ntb_link_is_up(dev, &nt->link_speed, &nt->link_width)) callout_reset(&nt->link_work, NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt); } struct ntb_load_cb_args { bus_addr_t addr; int error; }; static void ntb_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct ntb_load_cb_args *cba = (struct ntb_load_cb_args *)xsc; if (!(cba->error = error)) cba->addr = segs[0].ds_addr; } static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, size_t size) { struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; struct ntb_load_cb_args cba; size_t buff_size; if (size == 0) return (EINVAL); buff_size = roundup(size, mw->xlat_align_size); /* No need to re-setup */ if (mw->buff_size == buff_size) return (0); if (mw->buff_size != 0) ntb_free_mw(nt, num_mw); /* Alloc memory for receiving data. Must be aligned */ mw->buff_size = buff_size; if (bus_dma_tag_create(bus_get_dma_tag(nt->dev), mw->xlat_align, 0, mw->addr_limit, BUS_SPACE_MAXADDR, NULL, NULL, mw->buff_size, 1, mw->buff_size, 0, NULL, NULL, &mw->dma_tag)) { ntb_printf(0, "Unable to create MW tag of size %zu\n", mw->buff_size); mw->buff_size = 0; return (ENOMEM); } if (bus_dmamem_alloc(mw->dma_tag, (void **)&mw->virt_addr, BUS_DMA_WAITOK | BUS_DMA_ZERO, &mw->dma_map)) { bus_dma_tag_destroy(mw->dma_tag); ntb_printf(0, "Unable to allocate MW buffer of size %zu\n", mw->buff_size); mw->buff_size = 0; return (ENOMEM); } if (bus_dmamap_load(mw->dma_tag, mw->dma_map, mw->virt_addr, mw->buff_size, ntb_load_cb, &cba, BUS_DMA_NOWAIT) || cba.error) { bus_dmamem_free(mw->dma_tag, mw->virt_addr, mw->dma_map); bus_dma_tag_destroy(mw->dma_tag); ntb_printf(0, "Unable to load MW buffer of size %zu\n", mw->buff_size); mw->buff_size = 0; return (ENOMEM); } mw->dma_addr = cba.addr; return (0); } static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) { struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; if (mw->virt_addr == NULL) return; ntb_mw_clear_trans(nt->dev, num_mw); bus_dmamap_unload(mw->dma_tag, mw->dma_map); bus_dmamem_free(mw->dma_tag, mw->virt_addr, mw->dma_map); bus_dma_tag_destroy(mw->dma_tag); mw->buff_size = 0; mw->virt_addr = NULL; } static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num) { struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; struct ntb_transport_mw *mw; void *offset; ntb_q_idx_t i; size_t rx_size; unsigned num_qps_mw, mw_num, mw_count; mw_count = nt->mw_count; mw_num = QP_TO_MW(nt, qp_num); mw = &nt->mw_vec[mw_num]; if (mw->virt_addr == NULL) return (ENOMEM); if (mw_num < nt->qp_count % mw_count) num_qps_mw = nt->qp_count / mw_count + 1; else num_qps_mw = nt->qp_count / mw_count; rx_size = mw->rx_size / num_qps_mw; qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); rx_size -= sizeof(struct ntb_rx_info); qp->remote_rx_info = (void*)(qp->rx_buff + rx_size); /* Due to house-keeping, there must be at least 2 buffs */ qp->rx_max_frame = qmin(transport_mtu, rx_size / 2); qp->rx_max_entry = rx_size / qp->rx_max_frame; qp->rx_index = 0; qp->remote_rx_info->entry = qp->rx_max_entry - 1; /* Set up the hdr offsets with 0s */ for (i = 0; i < qp->rx_max_entry; i++) { offset = (void *)(qp->rx_buff + qp->rx_max_frame * (i + 1) - sizeof(struct ntb_payload_header)); memset(offset, 0, sizeof(struct ntb_payload_header)); } qp->rx_pkts = 0; qp->tx_pkts = 0; qp->tx_index = 0; return (0); } static void ntb_qp_link_work(void *arg) { struct ntb_transport_qp *qp = arg; device_t dev = qp->dev; struct ntb_transport_ctx *nt = qp->transport; int i; uint32_t val; /* Report queues that are up on our side */ for (i = 0, val = 0; i < nt->qp_count; i++) { if (nt->qp_vec[i].client_ready) val |= (1 << i); } ntb_peer_spad_write(dev, NTBT_QP_LINKS, val); /* See if the remote side is up */ ntb_spad_read(dev, NTBT_QP_LINKS, &val); if ((val & (1ull << qp->qp_num)) != 0) { ntb_printf(2, "qp %d link up\n", qp->qp_num); qp->link_is_up = true; if (qp->event_handler != NULL) qp->event_handler(qp->cb_data, NTB_LINK_UP); ntb_db_clear_mask(dev, 1ull << qp->qp_num); } else if (nt->link_is_up) callout_reset(&qp->link_work, NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); } /* Link down event*/ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) { struct ntb_transport_qp *qp; int i; callout_drain(&nt->link_work); nt->link_is_up = 0; /* Pass along the info to any clients */ for (i = 0; i < nt->qp_count; i++) { if ((nt->qp_bitmap & (1 << i)) != 0) { qp = &nt->qp_vec[i]; ntb_qp_link_cleanup(qp); callout_drain(&qp->link_work); } } /* * The scratchpad registers keep the values if the remote side * goes down, blast them now to give them a sane value the next * time they are accessed */ ntb_spad_clear(nt->dev); } static void ntb_transport_link_cleanup_work(void *arg, int pending __unused) { ntb_transport_link_cleanup(arg); } static void ntb_qp_link_down(struct ntb_transport_qp *qp) { ntb_qp_link_cleanup(qp); } static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) { qp->link_is_up = false; ntb_db_set_mask(qp->dev, 1ull << qp->qp_num); qp->tx_index = qp->rx_index = 0; qp->tx_bytes = qp->rx_bytes = 0; qp->tx_pkts = qp->rx_pkts = 0; qp->rx_ring_empty = 0; qp->tx_ring_full = 0; qp->rx_err_no_buf = qp->tx_err_no_buf = 0; qp->rx_err_oflow = qp->rx_err_ver = 0; } static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) { callout_drain(&qp->link_work); ntb_qp_link_down_reset(qp); if (qp->event_handler != NULL) qp->event_handler(qp->cb_data, NTB_LINK_DOWN); } /* Link commanded down */ /** * ntb_transport_link_down - Notify NTB transport to no longer enqueue data * @qp: NTB transport layer queue to be disabled * * Notify NTB transport layer of client's desire to no longer receive data on * transport queue specified. It is the client's responsibility to ensure all * entries on queue are purged or otherwise handled appropriately. */ void ntb_transport_link_down(struct ntb_transport_qp *qp) { struct ntb_transport_ctx *nt = qp->transport; int i; uint32_t val; qp->client_ready = false; for (i = 0, val = 0; i < nt->qp_count; i++) { if (nt->qp_vec[i].client_ready) val |= (1 << i); } ntb_peer_spad_write(qp->dev, NTBT_QP_LINKS, val); if (qp->link_is_up) ntb_send_link_down(qp); else callout_drain(&qp->link_work); } /** * ntb_transport_link_query - Query transport link state * @qp: NTB transport layer queue to be queried * * Query connectivity to the remote system of the NTB transport queue * * RETURNS: true for link up or false for link down */ bool ntb_transport_link_query(struct ntb_transport_qp *qp) { return (qp->link_is_up); } /** * ntb_transport_link_speed - Query transport link speed * @qp: NTB transport layer queue to be queried * * Query connection speed to the remote system of the NTB transport queue * * RETURNS: link speed in bits per second */ uint64_t ntb_transport_link_speed(struct ntb_transport_qp *qp) { struct ntb_transport_ctx *nt = qp->transport; uint64_t rate; if (!nt->link_is_up) return (0); switch (nt->link_speed) { case NTB_SPEED_GEN1: rate = 2500000000 * 8 / 10; break; case NTB_SPEED_GEN2: rate = 5000000000 * 8 / 10; break; case NTB_SPEED_GEN3: rate = 8000000000 * 128 / 130; break; case NTB_SPEED_GEN4: rate = 16000000000 * 128 / 130; break; default: return (0); } if (nt->link_width <= 0) return (0); return (rate * nt->link_width); } static void ntb_send_link_down(struct ntb_transport_qp *qp) { struct ntb_queue_entry *entry; int i, rc; if (!qp->link_is_up) return; for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); if (entry != NULL) break; pause("NTB Wait for link down", hz / 10); } if (entry == NULL) return; entry->cb_data = NULL; entry->buf = NULL; entry->len = 0; entry->flags = NTBT_LINK_DOWN_FLAG; mtx_lock(&qp->tx_lock); rc = ntb_process_tx(qp, entry); mtx_unlock(&qp->tx_lock); if (rc != 0) printf("ntb: Failed to send link down\n"); ntb_qp_link_down_reset(qp); } /* List Management */ static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, struct ntb_queue_list *list) { mtx_lock_spin(lock); STAILQ_INSERT_TAIL(list, entry, entry); mtx_unlock_spin(lock); } static struct ntb_queue_entry * ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list) { struct ntb_queue_entry *entry; mtx_lock_spin(lock); if (STAILQ_EMPTY(list)) { entry = NULL; goto out; } entry = STAILQ_FIRST(list); STAILQ_REMOVE_HEAD(list, entry); out: mtx_unlock_spin(lock); return (entry); } static struct ntb_queue_entry * ntb_list_mv(struct mtx *lock, struct ntb_queue_list *from, struct ntb_queue_list *to) { struct ntb_queue_entry *entry; mtx_lock_spin(lock); if (STAILQ_EMPTY(from)) { entry = NULL; goto out; } entry = STAILQ_FIRST(from); STAILQ_REMOVE_HEAD(from, entry); STAILQ_INSERT_TAIL(to, entry, entry); out: mtx_unlock_spin(lock); return (entry); } /** * ntb_transport_qp_num - Query the qp number * @qp: NTB transport layer queue to be queried * * Query qp number of the NTB transport queue * * RETURNS: a zero based number specifying the qp number */ unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) { return (qp->qp_num); } /** * ntb_transport_max_size - Query the max payload size of a qp * @qp: NTB transport layer queue to be queried * * Query the maximum payload size permissible on the given qp * * RETURNS: the max payload size of a qp */ unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) { return (qp->tx_max_frame - sizeof(struct ntb_payload_header)); } unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) { unsigned int head = qp->tx_index; unsigned int tail = qp->remote_rx_info->entry; return (tail >= head ? tail - head : qp->tx_max_entry + tail - head); } static device_method_t ntb_transport_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ntb_transport_probe), DEVMETHOD(device_attach, ntb_transport_attach), DEVMETHOD(device_detach, ntb_transport_detach), /* Bus interface */ - DEVMETHOD(bus_child_location_str, ntb_transport_child_location_str), + DEVMETHOD(bus_child_location, ntb_transport_child_location), DEVMETHOD(bus_print_child, ntb_transport_print_child), DEVMETHOD_END }; devclass_t ntb_transport_devclass; static DEFINE_CLASS_0(ntb_transport, ntb_transport_driver, ntb_transport_methods, sizeof(struct ntb_transport_ctx)); DRIVER_MODULE(ntb_transport, ntb_hw, ntb_transport_driver, ntb_transport_devclass, NULL, NULL); MODULE_DEPEND(ntb_transport, ntb, 1, 1, 1); MODULE_VERSION(ntb_transport, 1); diff --git a/sys/dev/nvdimm/nvdimm_acpi.c b/sys/dev/nvdimm/nvdimm_acpi.c index ccb5dd048210..11c93ad29fdb 100644 --- a/sys/dev/nvdimm/nvdimm_acpi.c +++ b/sys/dev/nvdimm/nvdimm_acpi.c @@ -1,285 +1,280 @@ /*- * Copyright (c) 2017 The FreeBSD Foundation * Copyright (c) 2018, 2019 Intel Corporation * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #define _COMPONENT ACPI_OEM ACPI_MODULE_NAME("NVDIMM_ACPI") struct nvdimm_root_dev { SLIST_HEAD(, SPA_mapping) spas; }; static MALLOC_DEFINE(M_NVDIMM_ACPI, "nvdimm_acpi", "NVDIMM ACPI bus memory"); static ACPI_STATUS find_dimm(ACPI_HANDLE handle, UINT32 nesting_level, void *context, void **return_value) { ACPI_DEVICE_INFO *device_info; ACPI_STATUS status; status = AcpiGetObjectInfo(handle, &device_info); if (ACPI_FAILURE(status)) return_ACPI_STATUS(AE_ERROR); if (device_info->Address == (uintptr_t)context) { *(ACPI_HANDLE *)return_value = handle; return_ACPI_STATUS(AE_CTRL_TERMINATE); } return_ACPI_STATUS(AE_OK); } static ACPI_HANDLE get_dimm_acpi_handle(ACPI_HANDLE root_handle, nfit_handle_t adr) { ACPI_HANDLE res; ACPI_STATUS status; res = NULL; status = AcpiWalkNamespace(ACPI_TYPE_DEVICE, root_handle, 1, find_dimm, NULL, (void *)(uintptr_t)adr, &res); if (ACPI_FAILURE(status)) res = NULL; return (res); } static int nvdimm_root_create_devs(device_t dev, ACPI_TABLE_NFIT *nfitbl) { ACPI_HANDLE root_handle, dimm_handle; device_t child; nfit_handle_t *dimm_ids, *dimm; uintptr_t *ivars; int num_dimm_ids; root_handle = acpi_get_handle(dev); acpi_nfit_get_dimm_ids(nfitbl, &dimm_ids, &num_dimm_ids); for (dimm = dimm_ids; dimm < dimm_ids + num_dimm_ids; dimm++) { dimm_handle = get_dimm_acpi_handle(root_handle, *dimm); if (dimm_handle == NULL) continue; child = BUS_ADD_CHILD(dev, 100, "nvdimm", -1); if (child == NULL) { device_printf(dev, "failed to create nvdimm\n"); return (ENXIO); } ivars = mallocarray(NVDIMM_ROOT_IVAR_MAX, sizeof(uintptr_t), M_NVDIMM_ACPI, M_ZERO | M_WAITOK); device_set_ivars(child, ivars); nvdimm_root_set_acpi_handle(child, dimm_handle); nvdimm_root_set_device_handle(child, *dimm); } free(dimm_ids, M_NVDIMM_ACPI); return (0); } static int nvdimm_root_create_spas(struct nvdimm_root_dev *dev, ACPI_TABLE_NFIT *nfitbl) { ACPI_NFIT_SYSTEM_ADDRESS **spas, **spa; struct SPA_mapping *spa_mapping; enum SPA_mapping_type spa_type; int error, num_spas; error = 0; acpi_nfit_get_spa_ranges(nfitbl, &spas, &num_spas); for (spa = spas; spa < spas + num_spas; spa++) { spa_type = nvdimm_spa_type_from_uuid( (struct uuid *)(*spa)->RangeGuid); if (spa_type == SPA_TYPE_UNKNOWN) continue; spa_mapping = malloc(sizeof(struct SPA_mapping), M_NVDIMM_ACPI, M_WAITOK | M_ZERO); error = nvdimm_spa_init(spa_mapping, *spa, spa_type); if (error != 0) { nvdimm_spa_fini(spa_mapping); free(spa_mapping, M_NVDIMM_ACPI); break; } if (nvdimm_spa_type_user_accessible(spa_type) && spa_type != SPA_TYPE_CONTROL_REGION) nvdimm_create_namespaces(spa_mapping, nfitbl); SLIST_INSERT_HEAD(&dev->spas, spa_mapping, link); } free(spas, M_NVDIMM_ACPI); return (error); } static char *nvdimm_root_id[] = {"ACPI0012", NULL}; static int nvdimm_root_probe(device_t dev) { int rv; if (acpi_disabled("nvdimm")) return (ENXIO); rv = ACPI_ID_PROBE(device_get_parent(dev), dev, nvdimm_root_id, NULL); if (rv <= 0) device_set_desc(dev, "ACPI NVDIMM root device"); return (rv); } static int nvdimm_root_attach(device_t dev) { struct nvdimm_root_dev *root; ACPI_TABLE_NFIT *nfitbl; ACPI_STATUS status; int error; status = AcpiGetTable(ACPI_SIG_NFIT, 1, (ACPI_TABLE_HEADER **)&nfitbl); if (ACPI_FAILURE(status)) { device_printf(dev, "cannot get NFIT\n"); return (ENXIO); } error = nvdimm_root_create_devs(dev, nfitbl); if (error != 0) return (error); error = bus_generic_attach(dev); if (error != 0) return (error); root = device_get_softc(dev); error = nvdimm_root_create_spas(root, nfitbl); AcpiPutTable(&nfitbl->Header); return (error); } static int nvdimm_root_detach(device_t dev) { struct nvdimm_root_dev *root; struct SPA_mapping *spa, *next; device_t *children; int i, error, num_children; root = device_get_softc(dev); SLIST_FOREACH_SAFE(spa, &root->spas, link, next) { nvdimm_destroy_namespaces(spa); nvdimm_spa_fini(spa); SLIST_REMOVE_HEAD(&root->spas, link); free(spa, M_NVDIMM_ACPI); } error = bus_generic_detach(dev); if (error != 0) return (error); error = device_get_children(dev, &children, &num_children); if (error != 0) return (error); for (i = 0; i < num_children; i++) free(device_get_ivars(children[i]), M_NVDIMM_ACPI); free(children, M_TEMP); error = device_delete_children(dev); return (error); } static int nvdimm_root_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { if (index < 0 || index >= NVDIMM_ROOT_IVAR_MAX) return (ENOENT); *result = ((uintptr_t *)device_get_ivars(child))[index]; return (0); } static int nvdimm_root_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { if (index < 0 || index >= NVDIMM_ROOT_IVAR_MAX) return (ENOENT); ((uintptr_t *)device_get_ivars(child))[index] = value; return (0); } static int -nvdimm_root_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) +nvdimm_root_child_location(device_t dev, device_t child, struct sbuf *sb) { ACPI_HANDLE handle; - int res; handle = nvdimm_root_get_acpi_handle(child); if (handle != NULL) - res = snprintf(buf, buflen, "handle=%s", acpi_name(handle)); - else - res = snprintf(buf, buflen, ""); + sbuf_printf(sb, "handle=%s", acpi_name(handle)); - if (res >= buflen) - return (EOVERFLOW); return (0); } static device_method_t nvdimm_acpi_methods[] = { DEVMETHOD(device_probe, nvdimm_root_probe), DEVMETHOD(device_attach, nvdimm_root_attach), DEVMETHOD(device_detach, nvdimm_root_detach), DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_read_ivar, nvdimm_root_read_ivar), DEVMETHOD(bus_write_ivar, nvdimm_root_write_ivar), - DEVMETHOD(bus_child_location_str, nvdimm_root_child_location_str), + DEVMETHOD(bus_child_location, nvdimm_root_child_location), DEVMETHOD_END }; static driver_t nvdimm_acpi_driver = { "nvdimm_acpi_root", nvdimm_acpi_methods, sizeof(struct nvdimm_root_dev), }; static devclass_t nvdimm_acpi_root_devclass; DRIVER_MODULE(nvdimm_acpi_root, acpi, nvdimm_acpi_driver, nvdimm_acpi_root_devclass, NULL, NULL); MODULE_DEPEND(nvdimm_acpi_root, acpi, 1, 1, 1); diff --git a/sys/dev/ofw/ofw_bus_subr.h b/sys/dev/ofw/ofw_bus_subr.h index 0452e32001a0..45709c92c1f7 100644 --- a/sys/dev/ofw/ofw_bus_subr.h +++ b/sys/dev/ofw/ofw_bus_subr.h @@ -1,153 +1,153 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005 Marius Strobl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _DEV_OFW_OFW_BUS_SUBR_H_ #define _DEV_OFW_OFW_BUS_SUBR_H_ #include #ifdef INTRNG #include #endif #include #include "ofw_bus_if.h" #define ORIP_NOINT -1 #define ORIR_NOTFOUND 0xffffffff struct ofw_bus_iinfo { uint8_t *opi_imap; uint8_t *opi_imapmsk; int opi_imapsz; pcell_t opi_addrc; }; struct ofw_compat_data { const char *ocd_str; uintptr_t ocd_data; }; #ifdef INTRNG struct intr_map_data_fdt { struct intr_map_data hdr; phandle_t iparent; u_int ncells; pcell_t cells[]; }; #endif #define FDTCOMPAT_PNP_DESCR "Z:compat;P:#;" #define FDTCOMPAT_PNP_INFO(t, busname) \ MODULE_PNP_INFO(FDTCOMPAT_PNP_DESCR, busname, t, t, sizeof(t) / sizeof(t[0])); #define OFWBUS_PNP_INFO(t) FDTCOMPAT_PNP_INFO(t, ofwbus) #define SIMPLEBUS_PNP_INFO(t) FDTCOMPAT_PNP_INFO(t, simplebus) /* Generic implementation of ofw_bus_if.m methods and helper routines */ int ofw_bus_gen_setup_devinfo(struct ofw_bus_devinfo *, phandle_t); void ofw_bus_gen_destroy_devinfo(struct ofw_bus_devinfo *); ofw_bus_get_compat_t ofw_bus_gen_get_compat; ofw_bus_get_model_t ofw_bus_gen_get_model; ofw_bus_get_name_t ofw_bus_gen_get_name; ofw_bus_get_node_t ofw_bus_gen_get_node; ofw_bus_get_type_t ofw_bus_gen_get_type; /* Helper method to report interesting OF properties in pnpinfo */ -bus_child_pnpinfo_str_t ofw_bus_gen_child_pnpinfo_str; +bus_child_pnpinfo_t ofw_bus_gen_child_pnpinfo; /* Routines for processing firmware interrupt maps */ void ofw_bus_setup_iinfo(phandle_t, struct ofw_bus_iinfo *, int); int ofw_bus_lookup_imap(phandle_t, struct ofw_bus_iinfo *, void *, int, void *, int, void *, int, phandle_t *); int ofw_bus_search_intrmap(void *, int, void *, int, void *, int, void *, void *, void *, int, phandle_t *); /* Routines for processing msi maps */ int ofw_bus_msimap(phandle_t, uint16_t, phandle_t *, uint32_t *); /* Routines for parsing device-tree data into resource lists. */ int ofw_bus_reg_to_rl(device_t, phandle_t, pcell_t, pcell_t, struct resource_list *); int ofw_bus_assigned_addresses_to_rl(device_t, phandle_t, pcell_t, pcell_t, struct resource_list *); int ofw_bus_intr_to_rl(device_t, phandle_t, struct resource_list *, int *); int ofw_bus_intr_by_rid(device_t, phandle_t, int, phandle_t *, int *, pcell_t **); /* Helper to get device status property */ const char *ofw_bus_get_status(device_t dev); int ofw_bus_status_okay(device_t dev); int ofw_bus_node_status_okay(phandle_t node); /* Helper to get node's interrupt parent */ phandle_t ofw_bus_find_iparent(phandle_t); /* Helper routine for checking compat prop */ int ofw_bus_is_compatible(device_t, const char *); int ofw_bus_is_compatible_strict(device_t, const char *); int ofw_bus_node_is_compatible(phandle_t, const char *); /* * Helper routine to search a list of compat properties. The table is * terminated by an entry with a NULL compat-string pointer; a pointer to that * table entry is returned if none of the compat strings match for the device, * giving you control over the not-found value. Will not return NULL unless the * provided table pointer is NULL. */ const struct ofw_compat_data * ofw_bus_search_compatible(device_t, const struct ofw_compat_data *); /* Helper routine for checking existence of a prop */ int ofw_bus_has_prop(device_t, const char *); /* Helper to search for a child with a given compat prop */ phandle_t ofw_bus_find_compatible(phandle_t, const char *); /* Helper to search for a child with a given name */ phandle_t ofw_bus_find_child(phandle_t, const char *); /* Helper routine to find a device_t child matching a given phandle_t */ device_t ofw_bus_find_child_device_by_phandle(device_t bus, phandle_t node); /* Helper routines for parsing lists */ int ofw_bus_parse_xref_list_alloc(phandle_t node, const char *list_name, const char *cells_name, int idx, phandle_t *producer, int *ncells, pcell_t **cells); int ofw_bus_parse_xref_list_get_length(phandle_t node, const char *list_name, const char *cells_name, int *count); int ofw_bus_find_string_index(phandle_t node, const char *list_name, const char *name, int *idx); int ofw_bus_string_list_to_array(phandle_t node, const char *list_name, const char ***array); #endif /* !_DEV_OFW_OFW_BUS_SUBR_H_ */ diff --git a/sys/dev/ofw/ofw_cpu.c b/sys/dev/ofw/ofw_cpu.c index 293d765e8cbd..5cbec1eb2ab1 100644 --- a/sys/dev/ofw/ofw_cpu.c +++ b/sys/dev/ofw/ofw_cpu.c @@ -1,403 +1,403 @@ /*- * Copyright (C) 2009 Nathan Whitehorn * Copyright (C) 2015 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #ifdef EXT_RESOURCES #include #endif static int ofw_cpulist_probe(device_t); static int ofw_cpulist_attach(device_t); static const struct ofw_bus_devinfo *ofw_cpulist_get_devinfo(device_t dev, device_t child); static MALLOC_DEFINE(M_OFWCPU, "ofwcpu", "OFW CPU device information"); struct ofw_cpulist_softc { pcell_t sc_addr_cells; }; static device_method_t ofw_cpulist_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_cpulist_probe), DEVMETHOD(device_attach, ofw_cpulist_attach), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), - DEVMETHOD(bus_child_pnpinfo_str, ofw_bus_gen_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, ofw_cpulist_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static driver_t ofw_cpulist_driver = { "cpulist", ofw_cpulist_methods, sizeof(struct ofw_cpulist_softc) }; static devclass_t ofw_cpulist_devclass; DRIVER_MODULE(ofw_cpulist, ofwbus, ofw_cpulist_driver, ofw_cpulist_devclass, 0, 0); static int ofw_cpulist_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (name == NULL || strcmp(name, "cpus") != 0) return (ENXIO); device_set_desc(dev, "Open Firmware CPU Group"); return (0); } static int ofw_cpulist_attach(device_t dev) { struct ofw_cpulist_softc *sc; phandle_t root, child; device_t cdev; struct ofw_bus_devinfo *dinfo; sc = device_get_softc(dev); root = ofw_bus_get_node(dev); sc->sc_addr_cells = 1; OF_getencprop(root, "#address-cells", &sc->sc_addr_cells, sizeof(sc->sc_addr_cells)); for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_OFWCPU, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(dinfo, child) != 0) { free(dinfo, M_OFWCPU); continue; } cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->obd_name); ofw_bus_gen_destroy_devinfo(dinfo); free(dinfo, M_OFWCPU); continue; } device_set_ivars(cdev, dinfo); } return (bus_generic_attach(dev)); } static const struct ofw_bus_devinfo * ofw_cpulist_get_devinfo(device_t dev, device_t child) { return (device_get_ivars(child)); } static int ofw_cpu_probe(device_t); static int ofw_cpu_attach(device_t); static int ofw_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); struct ofw_cpu_softc { struct pcpu *sc_cpu_pcpu; uint32_t sc_nominal_mhz; boolean_t sc_reg_valid; pcell_t sc_reg[2]; }; static device_method_t ofw_cpu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_cpu_probe), DEVMETHOD(device_attach, ofw_cpu_attach), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_read_ivar, ofw_cpu_read_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource,bus_generic_activate_resource), DEVMETHOD_END }; static driver_t ofw_cpu_driver = { "cpu", ofw_cpu_methods, sizeof(struct ofw_cpu_softc) }; static devclass_t ofw_cpu_devclass; DRIVER_MODULE(ofw_cpu, cpulist, ofw_cpu_driver, ofw_cpu_devclass, 0, 0); static int ofw_cpu_probe(device_t dev) { const char *type = ofw_bus_get_type(dev); if (type == NULL || strcmp(type, "cpu") != 0) return (ENXIO); device_set_desc(dev, "Open Firmware CPU"); return (0); } static int ofw_cpu_attach(device_t dev) { struct ofw_cpulist_softc *psc; struct ofw_cpu_softc *sc; phandle_t node; pcell_t cell; int rv; #ifdef EXT_RESOURCES clk_t cpuclk; uint64_t freq; #endif sc = device_get_softc(dev); psc = device_get_softc(device_get_parent(dev)); if (nitems(sc->sc_reg) < psc->sc_addr_cells) { if (bootverbose) device_printf(dev, "Too many address cells\n"); return (EINVAL); } node = ofw_bus_get_node(dev); /* Read and validate the reg property for use later */ sc->sc_reg_valid = false; rv = OF_getencprop(node, "reg", sc->sc_reg, sizeof(sc->sc_reg)); if (rv < 0) device_printf(dev, "missing 'reg' property\n"); else if ((rv % 4) != 0) { if (bootverbose) device_printf(dev, "Malformed reg property\n"); } else if ((rv / 4) != psc->sc_addr_cells) { if (bootverbose) device_printf(dev, "Invalid reg size %u\n", rv); } else sc->sc_reg_valid = true; #ifdef __powerpc__ /* * On powerpc, "interrupt-servers" denotes a SMT CPU. Look for any * thread on this CPU, and assign that. */ if (OF_hasprop(node, "ibm,ppc-interrupt-server#s")) { struct cpuref cpuref; cell_t *servers; int i, nservers, rv; if ((nservers = OF_getencprop_alloc(node, "ibm,ppc-interrupt-server#s", (void **)&servers)) < 0) return (ENXIO); nservers /= sizeof(cell_t); for (i = 0; i < nservers; i++) { for (rv = platform_smp_first_cpu(&cpuref); rv == 0; rv = platform_smp_next_cpu(&cpuref)) { if (cpuref.cr_hwref == servers[i]) { sc->sc_cpu_pcpu = pcpu_find(cpuref.cr_cpuid); if (sc->sc_cpu_pcpu == NULL) { OF_prop_free(servers); return (ENXIO); } break; } } if (rv != ENOENT) break; } OF_prop_free(servers); if (sc->sc_cpu_pcpu == NULL) { device_printf(dev, "No CPU found for this device.\n"); return (ENXIO); } } else #endif sc->sc_cpu_pcpu = pcpu_find(device_get_unit(dev)); if (OF_getencprop(node, "clock-frequency", &cell, sizeof(cell)) < 0) { #ifdef EXT_RESOURCES rv = clk_get_by_ofw_index(dev, 0, 0, &cpuclk); if (rv == 0) { rv = clk_get_freq(cpuclk, &freq); if (rv != 0 && bootverbose) device_printf(dev, "Cannot get freq of property clocks\n"); else sc->sc_nominal_mhz = freq / 1000000; } else #endif { if (bootverbose) device_printf(dev, "missing 'clock-frequency' property\n"); } } else sc->sc_nominal_mhz = cell / 1000000; /* convert to MHz */ if (sc->sc_nominal_mhz != 0 && bootverbose) device_printf(dev, "Nominal frequency %dMhz\n", sc->sc_nominal_mhz); bus_generic_probe(dev); return (bus_generic_attach(dev)); } static int ofw_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct ofw_cpulist_softc *psc; struct ofw_cpu_softc *sc; sc = device_get_softc(dev); switch (index) { case CPU_IVAR_PCPU: *result = (uintptr_t)sc->sc_cpu_pcpu; return (0); case CPU_IVAR_NOMINAL_MHZ: if (sc->sc_nominal_mhz > 0) { *result = (uintptr_t)sc->sc_nominal_mhz; return (0); } break; case CPU_IVAR_CPUID_SIZE: psc = device_get_softc(device_get_parent(dev)); *result = psc->sc_addr_cells; return (0); case CPU_IVAR_CPUID: if (sc->sc_reg_valid) { *result = (uintptr_t)sc->sc_reg; return (0); } break; } return (ENOENT); } int ofw_cpu_early_foreach(ofw_cpu_foreach_cb callback, boolean_t only_runnable) { phandle_t node, child; pcell_t addr_cells, reg[2]; char status[16]; char device_type[16]; u_int id, next_id; int count, rv; count = 0; id = 0; next_id = 0; node = OF_finddevice("/cpus"); if (node == -1) return (-1); /* Find the number of cells in the cpu register */ if (OF_getencprop(node, "#address-cells", &addr_cells, sizeof(addr_cells)) < 0) return (-1); for (child = OF_child(node); child != 0; child = OF_peer(child), id = next_id) { /* Check if child is a CPU */ memset(device_type, 0, sizeof(device_type)); rv = OF_getprop(child, "device_type", device_type, sizeof(device_type) - 1); if (rv < 0) continue; if (strcmp(device_type, "cpu") != 0) continue; /* We're processing CPU, update next_id used in the next iteration */ next_id++; /* * If we are filtering by runnable then limit to only * those that have been enabled, or do provide a method * to enable them. */ if (only_runnable) { status[0] = '\0'; OF_getprop(child, "status", status, sizeof(status)); if (status[0] != '\0' && strcmp(status, "okay") != 0 && strcmp(status, "ok") != 0 && !OF_hasprop(child, "enable-method")) continue; } /* * Check we have a register to identify the cpu */ rv = OF_getencprop(child, "reg", reg, addr_cells * sizeof(cell_t)); if (rv != addr_cells * sizeof(cell_t)) continue; if (callback == NULL || callback(id, child, addr_cells, reg)) count++; } return (only_runnable ? count : id); } diff --git a/sys/dev/ow/ow.c b/sys/dev/ow/ow.c index a6583accaa59..c39f559c5699 100644 --- a/sys/dev/ow/ow.c +++ b/sys/dev/ow/ow.c @@ -1,746 +1,732 @@ /*- * Copyright (c) 2015 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include /* * lldev - link level device * ndev - network / transport device (this module) * pdev - presentation device (children of this module) */ typedef int ow_enum_fn(device_t, device_t); typedef int ow_found_fn(device_t, romid_t); struct ow_softc { device_t dev; /* Newbus driver back pointer */ struct mtx mtx; /* bus mutex */ device_t owner; /* bus owner, if != NULL */ }; struct ow_devinfo { romid_t romid; }; static int ow_acquire_bus(device_t ndev, device_t pdev, int how); static void ow_release_bus(device_t ndev, device_t pdev); #define OW_LOCK(_sc) mtx_lock(&(_sc)->mtx) #define OW_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define OW_LOCK_DESTROY(_sc) mtx_destroy(&_sc->mtx) #define OW_ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED) #define OW_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->mtx, MA_NOTOWNED) static MALLOC_DEFINE(M_OW, "ow", "House keeping data for 1wire bus"); static const struct ow_timing timing_regular_min = { .t_slot = 60, .t_low0 = 60, .t_low1 = 1, .t_release = 0, .t_rec = 1, .t_rdv = 15, /* fixed */ .t_rstl = 480, .t_rsth = 480, .t_pdl = 60, .t_pdh = 15, .t_lowr = 1, }; static const struct ow_timing timing_regular_max = { .t_slot = 120, .t_low0 = 120, .t_low1 = 15, .t_release = 45, .t_rec = 960, /* infinity */ .t_rdv = 15, /* fixed */ .t_rstl = 960, /* infinity */ .t_rsth = 960, /* infinity */ .t_pdl = 240, /* 60us to 240us */ .t_pdh = 60, /* 15us to 60us */ .t_lowr = 15, /* 1us */ }; static struct ow_timing timing_regular = { .t_slot = 60, /* 60 <= t < 120 */ .t_low0 = 60, /* 60 <= t < t_slot < 120 */ .t_low1 = 1, /* 1 <= t < 15 */ .t_release = 45, /* 0 <= t < 45 */ .t_rec = 15, /* 1 <= t < inf */ .t_rdv = 15, /* t == 15 */ .t_rstl = 480, /* 480 <= t < inf */ .t_rsth = 480, /* 480 <= t < inf */ .t_pdl = 60, /* 60 <= t < 240 */ .t_pdh = 60, /* 15 <= t < 60 */ .t_lowr = 1, /* 1 <= t < 15 */ }; /* NB: Untested */ static const struct ow_timing timing_overdrive_min = { .t_slot = 6, .t_low0 = 6, .t_low1 = 1, .t_release = 0, .t_rec = 1, .t_rdv = 2, /* fixed */ .t_rstl = 48, .t_rsth = 48, .t_pdl = 8, .t_pdh = 2, .t_lowr = 1, }; static const struct ow_timing timing_overdrive_max = { .t_slot = 16, .t_low0 = 16, .t_low1 = 2, .t_release = 4, .t_rec = 960, /* infinity */ .t_rdv = 2, /* fixed */ .t_rstl = 80, .t_rsth = 960, /* infinity */ .t_pdl = 24, .t_pdh = 6, .t_lowr = 2, }; static struct ow_timing timing_overdrive = { .t_slot = 11, /* 6 <= t < 16 */ .t_low0 = 6, /* 6 <= t < t_slot < 16 */ .t_low1 = 1, /* 1 <= t < 2 */ .t_release = 4, /* 0 <= t < 4 */ .t_rec = 1, /* 1 <= t < inf */ .t_rdv = 2, /* t == 2 */ .t_rstl = 48, /* 48 <= t < 80 */ .t_rsth = 48, /* 48 <= t < inf */ .t_pdl = 8, /* 8 <= t < 24 */ .t_pdh = 2, /* 2 <= t < 6 */ .t_lowr = 1, /* 1 <= t < 2 */ }; SYSCTL_NODE(_hw, OID_AUTO, ow, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "1-Wire protocol"); SYSCTL_NODE(_hw_ow, OID_AUTO, regular, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Regular mode timings"); SYSCTL_NODE(_hw_ow, OID_AUTO, overdrive, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Overdrive mode timings"); #define _OW_TIMING_SYSCTL(mode, param) \ static int \ sysctl_ow_timing_ ## mode ## _ ## param(SYSCTL_HANDLER_ARGS) \ { \ int val = timing_ ## mode.param; \ int err; \ err = sysctl_handle_int(oidp, &val, 0, req); \ if (err != 0 || req->newptr == NULL) \ return (err); \ if (val < timing_ ## mode ## _min.param) \ return (EINVAL); \ else if (val >= timing_ ## mode ## _max.param) \ return (EINVAL); \ timing_ ## mode.param = val; \ return (0); \ } \ SYSCTL_PROC(_hw_ow_ ## mode, OID_AUTO, param, \ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 0, sizeof(int), \ sysctl_ow_timing_ ## mode ## _ ## param, "I", \ "1-Wire timing parameter in microseconds (-1 resets to default)") #define OW_TIMING_SYSCTL(param) \ _OW_TIMING_SYSCTL(regular, param); \ _OW_TIMING_SYSCTL(overdrive, param) OW_TIMING_SYSCTL(t_slot); OW_TIMING_SYSCTL(t_low0); OW_TIMING_SYSCTL(t_low1); OW_TIMING_SYSCTL(t_release); OW_TIMING_SYSCTL(t_rec); OW_TIMING_SYSCTL(t_rdv); OW_TIMING_SYSCTL(t_rstl); OW_TIMING_SYSCTL(t_rsth); OW_TIMING_SYSCTL(t_pdl); OW_TIMING_SYSCTL(t_pdh); OW_TIMING_SYSCTL(t_lowr); #undef _OW_TIMING_SYSCTL #undef OW_TIMING_SYSCTL static void ow_send_byte(device_t lldev, struct ow_timing *t, uint8_t byte) { int i; for (i = 0; i < 8; i++) if (byte & (1 << i)) OWLL_WRITE_ONE(lldev, t); else OWLL_WRITE_ZERO(lldev, t); } static void ow_read_byte(device_t lldev, struct ow_timing *t, uint8_t *bytep) { int i; uint8_t byte = 0; int bit; for (i = 0; i < 8; i++) { OWLL_READ_DATA(lldev, t, &bit); byte |= bit << i; } *bytep = byte; } static int ow_send_command(device_t ndev, device_t pdev, struct ow_cmd *cmd) { int present, i, bit, tries; device_t lldev; struct ow_timing *t; lldev = device_get_parent(ndev); /* * Retry the reset a couple of times before giving up. */ tries = 4; do { OWLL_RESET_AND_PRESENCE(lldev, &timing_regular, &present); if (present == 1) device_printf(ndev, "Reset said no device on bus?.\n"); } while (present == 1 && tries-- > 0); if (present == 1) { device_printf(ndev, "Reset said the device wasn't there.\n"); return ENOENT; /* No devices acked the RESET */ } if (present == -1) { device_printf(ndev, "Reset discovered bus wired wrong.\n"); return ENOENT; } for (i = 0; i < cmd->rom_len; i++) ow_send_byte(lldev, &timing_regular, cmd->rom_cmd[i]); for (i = 0; i < cmd->rom_read_len; i++) ow_read_byte(lldev, &timing_regular, cmd->rom_read + i); if (cmd->xpt_len) { /* * Per AN937, the reset pulse and ROM level are always * done with the regular timings. Certain ROM commands * put the device into overdrive mode for the remainder * of the data transfer, which is why we have to pass the * timings here. Commands that need to be handled like this * are expected to be flagged by the client. */ t = (cmd->flags & OW_FLAG_OVERDRIVE) ? &timing_overdrive : &timing_regular; for (i = 0; i < cmd->xpt_len; i++) ow_send_byte(lldev, t, cmd->xpt_cmd[i]); if (cmd->flags & OW_FLAG_READ_BIT) { memset(cmd->xpt_read, 0, (cmd->xpt_read_len + 7) / 8); for (i = 0; i < cmd->xpt_read_len; i++) { OWLL_READ_DATA(lldev, t, &bit); cmd->xpt_read[i / 8] |= bit << (i % 8); } } else { for (i = 0; i < cmd->xpt_read_len; i++) ow_read_byte(lldev, t, cmd->xpt_read + i); } } return 0; } static int ow_search_rom(device_t lldev, device_t dev) { struct ow_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.rom_cmd[0] = SEARCH_ROM; cmd.rom_len = 1; return ow_send_command(lldev, dev, &cmd); } #if 0 static int ow_alarm_search(device_t lldev, device_t dev) { struct ow_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.rom_cmd[0] = ALARM_SEARCH; cmd.rom_len = 1; return ow_send_command(lldev, dev, &cmd); } #endif static int ow_add_child(device_t dev, romid_t romid) { struct ow_devinfo *di; device_t child; di = malloc(sizeof(*di), M_OW, M_WAITOK); di->romid = romid; child = device_add_child(dev, NULL, -1); if (child == NULL) { free(di, M_OW); return ENOMEM; } device_set_ivars(child, di); return (0); } static device_t ow_child_by_romid(device_t dev, romid_t romid) { device_t *children, retval, child; int nkid, i; struct ow_devinfo *di; if (device_get_children(dev, &children, &nkid) != 0) return (NULL); retval = NULL; for (i = 0; i < nkid; i++) { child = children[i]; di = device_get_ivars(child); if (di->romid == romid) { retval = child; break; } } free(children, M_TEMP); return (retval); } /* * CRC generator table -- taken from AN937 DOW CRC LOOKUP FUNCTION Table 2 */ const uint8_t ow_crc_table[] = { 0, 94, 188, 226, 97, 63, 221, 131, 194, 156, 126, 32, 163, 253, 31, 65, 157, 195, 33, 127, 252, 162, 64, 30, 95, 1, 227, 189, 62, 96, 130, 220, 35, 125, 159, 193, 66, 28, 254, 160, 225, 191, 93, 3, 128, 222, 60, 98, 190, 224, 2, 92, 223, 129, 99, 61, 124, 34, 192, 158, 29, 67, 161, 255, 70, 24, 250, 164, 39, 121, 155, 197, 132, 218, 56, 102, 229, 187, 89, 7, 219, 133,103, 57, 186, 228, 6, 88, 25, 71, 165, 251, 120, 38, 196, 154, 101, 59, 217, 135, 4, 90, 184, 230, 167, 249, 27, 69, 198, 152, 122, 36, 248, 166, 68, 26, 153, 199, 37, 123, 58, 100, 134, 216, 91, 5, 231, 185, 140,210, 48, 110, 237, 179, 81, 15, 78, 16, 242, 172, 47, 113,147, 205, 17, 79, 173, 243, 112, 46, 204, 146, 211,141, 111, 49, 178, 236, 14, 80, 175, 241, 19, 77, 206, 144, 114, 44, 109, 51, 209, 143, 12, 82,176, 238, 50, 108, 142, 208, 83, 13, 239, 177, 240, 174, 76, 18, 145, 207, 45, 115, 202, 148, 118, 40, 171, 245, 23, 73, 8, 86, 180, 234, 105, 55, 213, 139, 87, 9, 235, 181, 54, 104, 138, 212, 149, 203, 41, 119, 244, 170, 72, 22, 233, 183, 85, 11, 136, 214, 52, 106, 43, 117, 151, 201, 74, 20, 246, 168, 116, 42, 200, 150, 21, 75, 169, 247, 182, 232, 10, 84, 215, 137, 107, 53 }; /* * Converted from DO_CRC page 131 ANN937 */ static uint8_t ow_crc(device_t ndev, device_t pdev, uint8_t *buffer, size_t len) { uint8_t crc = 0; int i; for (i = 0; i < len; i++) crc = ow_crc_table[crc ^ buffer[i]]; return crc; } static int ow_check_crc(romid_t romid) { return ow_crc(NULL, NULL, (uint8_t *)&romid, sizeof(romid)) == 0; } static int ow_device_found(device_t dev, romid_t romid) { /* XXX Move this up into enumerate? */ /* * All valid ROM IDs have a valid CRC. Check that first. */ if (!ow_check_crc(romid)) { device_printf(dev, "Device romid %8D failed CRC.\n", &romid, ":"); return EINVAL; } /* * If we've seen this child before, don't add a new one for it. */ if (ow_child_by_romid(dev, romid) != NULL) return 0; return ow_add_child(dev, romid); } static int ow_enumerate(device_t dev, ow_enum_fn *enumfp, ow_found_fn *foundfp) { device_t lldev = device_get_parent(dev); int first, second, i, dir, prior, last, err, retries; uint64_t probed, last_mask; int sanity = 10; prior = -1; last_mask = 0; retries = 0; last = -2; err = ow_acquire_bus(dev, dev, OWN_DONTWAIT); if (err != 0) return err; while (last != -1) { if (sanity-- < 0) { printf("Reached the sanity limit\n"); return EIO; } again: probed = 0; last = -1; /* * See AN397 section 5.II.C.3 for the algorithm (though a bit * poorly stated). The search command forces each device to * send ROM ID bits one at a time (first the bit, then the * complement) the master (us) sends back a bit. If the * device's bit doesn't match what we send back, that device * stops sending bits back. So each time through we remember * where we made the last decision (always 0). If there's a * conflict there this time (and there will be in the absence * of a hardware failure) we go with 1. This way, we prune the * devices on the bus and wind up with a unique ROM. We know * we're done when we detect no new conflicts. The same * algorithm is used for devices in alarm state as well. * * In addition, experience has shown that sometimes devices * stop responding in the middle of enumeration, so try this * step again a few times when that happens. It is unclear if * this is due to a nosiy electrical environment or some odd * timing issue. */ /* * The enumeration command should be successfully sent, if not, * we have big issues on the bus so punt. Lower layers report * any unusual errors, so we don't need to here. */ err = enumfp(dev, dev); if (err != 0) return (err); for (i = 0; i < 64; i++) { OWLL_READ_DATA(lldev, &timing_regular, &first); OWLL_READ_DATA(lldev, &timing_regular, &second); switch (first | second << 1) { case 0: /* Conflict */ if (i < prior) dir = (last_mask >> i) & 1; else dir = i == prior; if (dir == 0) last = i; break; case 1: /* 1 then 0 -> 1 for all */ dir = 1; break; case 2: /* 0 then 1 -> 0 for all */ dir = 0; break; case 3: /* * No device responded. This is unexpected, but * experience has shown that on some platforms * we miss a timing window, or otherwise have * an issue. Start this step over. Since we've * not updated prior yet, we can just jump to * the top of the loop for a re-do of this step. */ printf("oops, starting over\n"); if (++retries > 5) return (EIO); goto again; default: /* NOTREACHED */ __assert_unreachable(); } if (dir) { OWLL_WRITE_ONE(lldev, &timing_regular); probed |= 1ull << i; } else { OWLL_WRITE_ZERO(lldev, &timing_regular); } } retries = 0; foundfp(dev, probed); last_mask = probed; prior = last; } ow_release_bus(dev, dev); return (0); } static int ow_probe(device_t dev) { device_set_desc(dev, "1 Wire Bus"); return (BUS_PROBE_GENERIC); } static int ow_attach(device_t ndev) { struct ow_softc *sc; /* * Find all the devices on the bus. We don't probe / attach them in the * enumeration phase. We do this because we want to allow the probe / * attach routines of the child drivers to have as full an access to the * bus as possible. While we reset things before the next step of the * search (so it would likely be OK to allow access by the clients to * the bus), it is more conservative to find them all, then to do the * attach of the devices. This also allows the child devices to have * more knowledge of the bus. We also ignore errors from the enumeration * because they might happen after we've found a few devices. */ sc = device_get_softc(ndev); sc->dev = ndev; mtx_init(&sc->mtx, device_get_nameunit(sc->dev), "ow", MTX_DEF); ow_enumerate(ndev, ow_search_rom, ow_device_found); return bus_generic_attach(ndev); } static int ow_detach(device_t ndev) { device_t *children, child; int nkid, i; struct ow_devinfo *di; struct ow_softc *sc; sc = device_get_softc(ndev); /* * detach all the children first. This is blocking until any threads * have stopped, etc. */ bus_generic_detach(ndev); /* * We delete all the children, and free up the ivars */ if (device_get_children(ndev, &children, &nkid) != 0) return ENOMEM; for (i = 0; i < nkid; i++) { child = children[i]; di = device_get_ivars(child); free(di, M_OW); device_delete_child(ndev, child); } free(children, M_TEMP); OW_LOCK_DESTROY(sc); return 0; } -/* - * Not sure this is really needed. I'm having trouble figuring out what - * location means in the context of the one wire bus. - */ -static int -ow_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) -{ - - *buf = '\0'; - return (0); -} - static int -ow_child_pnpinfo_str(device_t dev, device_t child, char *buf, - size_t buflen) +ow_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) { struct ow_devinfo *di; di = device_get_ivars(child); - snprintf(buf, buflen, "romid=%8D", &di->romid, ":"); + sbuf_printf(sb, "romid=%8D", &di->romid, ":"); return (0); } static int ow_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ow_devinfo *di; romid_t **ptr; di = device_get_ivars(child); switch (which) { case OW_IVAR_FAMILY: *result = di->romid & 0xff; break; case OW_IVAR_ROMID: ptr = (romid_t **)result; *ptr = &di->romid; break; default: return EINVAL; } return 0; } static int ow_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { return EINVAL; } static int ow_print_child(device_t ndev, device_t pdev) { int retval = 0; struct ow_devinfo *di; di = device_get_ivars(pdev); retval += bus_print_child_header(ndev, pdev); retval += printf(" romid %8D", &di->romid, ":"); retval += bus_print_child_footer(ndev, pdev); return retval; } static void ow_probe_nomatch(device_t ndev, device_t pdev) { struct ow_devinfo *di; di = device_get_ivars(pdev); device_printf(ndev, "romid %8D: no driver\n", &di->romid, ":"); } static int ow_acquire_bus(device_t ndev, device_t pdev, int how) { struct ow_softc *sc; sc = device_get_softc(ndev); OW_ASSERT_UNLOCKED(sc); OW_LOCK(sc); if (sc->owner != NULL) { if (sc->owner == pdev) panic("%s: %s recursively acquiring the bus.\n", device_get_nameunit(ndev), device_get_nameunit(pdev)); if (how == OWN_DONTWAIT) { OW_UNLOCK(sc); return EWOULDBLOCK; } while (sc->owner != NULL) mtx_sleep(sc, &sc->mtx, 0, "owbuswait", 0); } sc->owner = pdev; OW_UNLOCK(sc); return 0; } static void ow_release_bus(device_t ndev, device_t pdev) { struct ow_softc *sc; sc = device_get_softc(ndev); OW_ASSERT_UNLOCKED(sc); OW_LOCK(sc); if (sc->owner == NULL) panic("%s: %s releasing unowned bus.", device_get_nameunit(ndev), device_get_nameunit(pdev)); if (sc->owner != pdev) panic("%s: %s don't own the bus. %s does. game over.", device_get_nameunit(ndev), device_get_nameunit(pdev), device_get_nameunit(sc->owner)); sc->owner = NULL; wakeup(sc); OW_UNLOCK(sc); } devclass_t ow_devclass; static device_method_t ow_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ow_probe), DEVMETHOD(device_attach, ow_attach), DEVMETHOD(device_detach, ow_detach), /* Bus interface */ - DEVMETHOD(bus_child_pnpinfo_str, ow_child_pnpinfo_str), - DEVMETHOD(bus_child_location_str, ow_child_location_str), + DEVMETHOD(bus_child_pnpinfo, ow_child_pnpinfo), DEVMETHOD(bus_read_ivar, ow_read_ivar), DEVMETHOD(bus_write_ivar, ow_write_ivar), DEVMETHOD(bus_print_child, ow_print_child), DEVMETHOD(bus_probe_nomatch, ow_probe_nomatch), /* One Wire Network/Transport layer interface */ DEVMETHOD(own_send_command, ow_send_command), DEVMETHOD(own_acquire_bus, ow_acquire_bus), DEVMETHOD(own_release_bus, ow_release_bus), DEVMETHOD(own_crc, ow_crc), { 0, 0 } }; static driver_t ow_driver = { "ow", ow_methods, sizeof(struct ow_softc), }; DRIVER_MODULE(ow, owc, ow_driver, ow_devclass, 0, 0); MODULE_VERSION(ow, 1); diff --git a/sys/dev/pccard/pccard.c b/sys/dev/pccard/pccard.c index 3bab77864347..8f19eb84725c 100644 --- a/sys/dev/pccard/pccard.c +++ b/sys/dev/pccard/pccard.c @@ -1,1486 +1,1480 @@ /* $NetBSD: pcmcia.c,v 1.23 2000/07/28 19:17:02 drochner Exp $ */ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1997 Marc Horowitz. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Marc Horowitz. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "power_if.h" #include "card_if.h" #define PCCARDDEBUG /* sysctl vars */ static SYSCTL_NODE(_hw, OID_AUTO, pccard, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "PCCARD parameters"); int pccard_debug = 0; SYSCTL_INT(_hw_pccard, OID_AUTO, debug, CTLFLAG_RWTUN, &pccard_debug, 0, "pccard debug"); int pccard_cis_debug = 0; SYSCTL_INT(_hw_pccard, OID_AUTO, cis_debug, CTLFLAG_RWTUN, &pccard_cis_debug, 0, "pccard CIS debug"); #ifdef PCCARDDEBUG #define DPRINTF(arg) if (pccard_debug) printf arg #define DEVPRINTF(arg) if (pccard_debug) device_printf arg #define PRVERBOSE(arg) printf arg #define DEVPRVERBOSE(arg) device_printf arg #else #define DPRINTF(arg) #define DEVPRINTF(arg) #define PRVERBOSE(arg) if (bootverbose) printf arg #define DEVPRVERBOSE(arg) if (bootverbose) device_printf arg #endif static int pccard_ccr_read(struct pccard_function *pf, int ccr); static void pccard_ccr_write(struct pccard_function *pf, int ccr, int val); static int pccard_attach_card(device_t dev); static int pccard_detach_card(device_t dev); static void pccard_function_init(struct pccard_function *pf, int entry); static void pccard_function_free(struct pccard_function *pf); static int pccard_function_enable(struct pccard_function *pf); static void pccard_function_disable(struct pccard_function *pf); static int pccard_probe(device_t dev); static int pccard_attach(device_t dev); static int pccard_detach(device_t dev); static void pccard_print_resources(struct resource_list *rl, const char *name, int type, int count, const char *format); static int pccard_print_child(device_t dev, device_t child); static int pccard_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count); static int pccard_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp); static void pccard_delete_resource(device_t dev, device_t child, int type, int rid); static int pccard_set_res_flags(device_t dev, device_t child, int type, int rid, u_long flags); static int pccard_set_memory_offset(device_t dev, device_t child, int rid, uint32_t offset, uint32_t *deltap); static int pccard_probe_and_attach_child(device_t dev, device_t child, struct pccard_function *pf); static void pccard_probe_nomatch(device_t cbdev, device_t child); static int pccard_read_ivar(device_t bus, device_t child, int which, uintptr_t *result); static void pccard_driver_added(device_t dev, driver_t *driver); static struct resource *pccard_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int pccard_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); static void pccard_child_detached(device_t parent, device_t dev); static int pccard_filter(void *arg); static void pccard_intr(void *arg); static int pccard_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep); static int pccard_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookie); static const struct pccard_product * pccard_do_product_lookup(device_t bus, device_t dev, const struct pccard_product *tab, size_t ent_size, pccard_product_match_fn matchfn); static int pccard_ccr_read(struct pccard_function *pf, int ccr) { return (bus_space_read_1(pf->pf_ccrt, pf->pf_ccrh, pf->pf_ccr_offset + ccr)); } static void pccard_ccr_write(struct pccard_function *pf, int ccr, int val) { if ((pf->ccr_mask) & (1 << (ccr / 2))) { bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, pf->pf_ccr_offset + ccr, val); } } static int pccard_set_default_descr(device_t dev) { const char *vendorstr, *prodstr; uint32_t vendor, prod; char *str; if (pccard_get_vendor_str(dev, &vendorstr)) return (0); if (pccard_get_product_str(dev, &prodstr)) return (0); if (vendorstr != NULL && prodstr != NULL) { str = malloc(strlen(vendorstr) + strlen(prodstr) + 2, M_DEVBUF, M_WAITOK); sprintf(str, "%s %s", vendorstr, prodstr); device_set_desc_copy(dev, str); free(str, M_DEVBUF); } else { if (pccard_get_vendor(dev, &vendor)) return (0); if (pccard_get_product(dev, &prod)) return (0); str = malloc(100, M_DEVBUF, M_WAITOK); snprintf(str, 100, "vendor=%#x product=%#x", vendor, prod); device_set_desc_copy(dev, str); free(str, M_DEVBUF); } return (0); } static int pccard_attach_card(device_t dev) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_function *pf; struct pccard_ivar *ivar; device_t child; int i; if (!STAILQ_EMPTY(&sc->card.pf_head)) { if (bootverbose || pccard_debug) device_printf(dev, "Card already inserted.\n"); } DEVPRINTF((dev, "chip_socket_enable\n")); POWER_ENABLE_SOCKET(device_get_parent(dev), dev); DEVPRINTF((dev, "read_cis\n")); pccard_read_cis(sc); DEVPRINTF((dev, "check_cis_quirks\n")); pccard_check_cis_quirks(dev); /* * bail now if the card has no functions, or if there was an error in * the cis. */ if (sc->card.error) { device_printf(dev, "CARD ERROR!\n"); return (1); } if (STAILQ_EMPTY(&sc->card.pf_head)) { device_printf(dev, "Card has no functions!\n"); return (1); } if (bootverbose || pccard_debug) pccard_print_cis(dev); DEVPRINTF((dev, "functions scanning\n")); i = -1; STAILQ_FOREACH(pf, &sc->card.pf_head, pf_list) { i++; if (STAILQ_EMPTY(&pf->cfe_head)) { device_printf(dev, "Function %d has no config entries.!\n", i); continue; } pf->sc = sc; pf->cfe = NULL; pf->dev = NULL; } DEVPRINTF((dev, "Card has %d functions. pccard_mfc is %d\n", i + 1, pccard_mfc(sc))); mtx_lock(&Giant); STAILQ_FOREACH(pf, &sc->card.pf_head, pf_list) { if (STAILQ_EMPTY(&pf->cfe_head)) continue; ivar = malloc(sizeof(struct pccard_ivar), M_DEVBUF, M_WAITOK | M_ZERO); resource_list_init(&ivar->resources); child = device_add_child(dev, NULL, -1); device_set_ivars(child, ivar); ivar->pf = pf; pf->dev = child; pccard_probe_and_attach_child(dev, child, pf); } mtx_unlock(&Giant); return (0); } static int pccard_probe_and_attach_child(device_t dev, device_t child, struct pccard_function *pf) { struct pccard_softc *sc = PCCARD_SOFTC(dev); int error; /* * In NetBSD, the drivers are responsible for activating each * function of a card and selecting the config to use. In * FreeBSD, all that's done automatically in the typical lazy * way we do device resource allocation (except we pick the * cfe up front). This is the biggest depature from the * inherited NetBSD model, apart from the FreeBSD resource code. * * This seems to work well in practice for most cards. * However, there are two cases that are problematic. If a * driver wishes to pick and chose which config entry to use, * then this method falls down. These are usually older * cards. In addition, there are some cards that have * multiple hardware units on the cards, but presents only one * CIS chain. These cards are combination cards, but only one * of these units can be on at a time. * * To overcome this limitation, while preserving the basic * model, the probe routine can select a cfe and try to * activate it. If that succeeds, then we'll keep track of * and let that information persist until we attach the card. * Probe routines that do this MUST return 0, and cannot * participate in the bidding process for a device. This * seems harsh until you realize that if a probe routine knows * enough to override the cfe we pick, then chances are very * very good that it is the only driver that could hope to * cope with the card. Bidding is for generic drivers, and * while some of them may also match, none of them will do * configuration override. */ error = device_probe(child); if (error != 0) goto out; pccard_function_init(pf, -1); if (sc->sc_enabled_count == 0) POWER_ENABLE_SOCKET(device_get_parent(dev), dev); if (pccard_function_enable(pf) == 0 && pccard_set_default_descr(child) == 0 && device_attach(child) == 0) { DEVPRINTF((sc->dev, "function %d CCR at %d offset %#x " "mask %#x: %#x %#x %#x %#x, %#x %#x %#x %#x, %#x\n", pf->number, pf->pf_ccr_window, pf->pf_ccr_offset, pf->ccr_mask, pccard_ccr_read(pf, 0x00), pccard_ccr_read(pf, 0x02), pccard_ccr_read(pf, 0x04), pccard_ccr_read(pf, 0x06), pccard_ccr_read(pf, 0x0A), pccard_ccr_read(pf, 0x0C), pccard_ccr_read(pf, 0x0E), pccard_ccr_read(pf, 0x10), pccard_ccr_read(pf, 0x12))); return (0); } error = ENXIO; out:; /* * Probe may fail AND also try to select a cfe, if so, free * it. This is how we do cfe override. Or the attach fails. * Either way, we have to clean up. */ if (pf->cfe != NULL) pccard_function_disable(pf); pf->cfe = NULL; pccard_function_free(pf); return error; } static int pccard_detach_card(device_t dev) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_function *pf; struct pccard_config_entry *cfe; struct pccard_ivar *devi; int state; /* * We are running on either the PCCARD socket's event thread * or in user context detaching a device by user request. */ STAILQ_FOREACH(pf, &sc->card.pf_head, pf_list) { if (pf->dev == NULL) continue; state = device_get_state(pf->dev); if (state == DS_ATTACHED || state == DS_BUSY) device_detach(pf->dev); if (pf->cfe != NULL) pccard_function_disable(pf); pccard_function_free(pf); devi = PCCARD_IVAR(pf->dev); device_delete_child(dev, pf->dev); free(devi, M_DEVBUF); } if (sc->sc_enabled_count == 0) POWER_DISABLE_SOCKET(device_get_parent(dev), dev); while (NULL != (pf = STAILQ_FIRST(&sc->card.pf_head))) { while (NULL != (cfe = STAILQ_FIRST(&pf->cfe_head))) { STAILQ_REMOVE_HEAD(&pf->cfe_head, cfe_list); free(cfe, M_DEVBUF); } STAILQ_REMOVE_HEAD(&sc->card.pf_head, pf_list); free(pf, M_DEVBUF); } STAILQ_INIT(&sc->card.pf_head); return (0); } static const struct pccard_product * pccard_do_product_lookup(device_t bus, device_t dev, const struct pccard_product *tab, size_t ent_size, pccard_product_match_fn matchfn) { const struct pccard_product *ent; int matches; uint32_t vendor; uint32_t prod; const char *vendorstr; const char *prodstr; const char *cis3str; const char *cis4str; #ifdef DIAGNOSTIC if (sizeof *ent > ent_size) panic("pccard_product_lookup: bogus ent_size %jd", (intmax_t) ent_size); #endif if (pccard_get_vendor(dev, &vendor)) return (NULL); if (pccard_get_product(dev, &prod)) return (NULL); if (pccard_get_vendor_str(dev, &vendorstr)) return (NULL); if (pccard_get_product_str(dev, &prodstr)) return (NULL); if (pccard_get_cis3_str(dev, &cis3str)) return (NULL); if (pccard_get_cis4_str(dev, &cis4str)) return (NULL); for (ent = tab; ent->pp_vendor != 0; ent = (const struct pccard_product *) ((const char *) ent + ent_size)) { matches = 1; if (ent->pp_vendor == PCCARD_VENDOR_ANY && ent->pp_product == PCCARD_PRODUCT_ANY && ent->pp_cis[0] == NULL && ent->pp_cis[1] == NULL) { if (ent->pp_name) device_printf(dev, "Total wildcard entry ignored for %s\n", ent->pp_name); continue; } if (matches && ent->pp_vendor != PCCARD_VENDOR_ANY && vendor != ent->pp_vendor) matches = 0; if (matches && ent->pp_product != PCCARD_PRODUCT_ANY && prod != ent->pp_product) matches = 0; if (matches && ent->pp_cis[0] && (vendorstr == NULL || strcmp(ent->pp_cis[0], vendorstr) != 0)) matches = 0; if (matches && ent->pp_cis[1] && (prodstr == NULL || strcmp(ent->pp_cis[1], prodstr) != 0)) matches = 0; if (matches && ent->pp_cis[2] && (cis3str == NULL || strcmp(ent->pp_cis[2], cis3str) != 0)) matches = 0; if (matches && ent->pp_cis[3] && (cis4str == NULL || strcmp(ent->pp_cis[3], cis4str) != 0)) matches = 0; if (matchfn != NULL) matches = (*matchfn)(dev, ent, matches); if (matches) return (ent); } return (NULL); } /** * @brief pccard_select_cfe * * Select a cfe entry to use. Should be called from the pccard's probe * routine after it knows for sure that it wants this card. * * XXX I think we need to make this symbol be static, ala the kobj stuff * we do for everything else. This is a quick hack. */ int pccard_select_cfe(device_t dev, int entry) { struct pccard_ivar *devi = PCCARD_IVAR(dev); struct pccard_function *pf = devi->pf; pccard_function_init(pf, entry); return (pf->cfe ? 0 : ENOMEM); } /* * Initialize a PCCARD function. May be called as long as the function is * disabled. * * Note: pccard_function_init should not keep resources allocated. It should * only set them up ala isa pnp, set the values in the rl lists, and return. * Any resource held after pccard_function_init is called is a bug. However, * the bus routines to get the resources also assume that pccard_function_init * does this, so they need to be fixed too. */ static void pccard_function_init(struct pccard_function *pf, int entry) { struct pccard_config_entry *cfe; struct pccard_ivar *devi = PCCARD_IVAR(pf->dev); struct resource_list *rl = &devi->resources; struct resource_list_entry *rle; struct resource *r = NULL; struct pccard_ce_iospace *ios; struct pccard_ce_memspace *mems; device_t bus; rman_res_t start, end, len; int i, rid, spaces; if (pf->pf_flags & PFF_ENABLED) { printf("pccard_function_init: function is enabled"); return; } /* * Driver probe routine requested a specific entry already * that succeeded. */ if (pf->cfe != NULL) return; /* * walk the list of configuration entries until we find one that * we can allocate all the resources to. */ bus = device_get_parent(pf->dev); STAILQ_FOREACH(cfe, &pf->cfe_head, cfe_list) { if (cfe->iftype != PCCARD_IFTYPE_IO) continue; if (entry != -1 && cfe->number != entry) continue; spaces = 0; for (i = 0; i < cfe->num_iospace; i++) { ios = cfe->iospace + i; start = ios->start; if (start) end = start + ios->length - 1; else end = ~0; DEVPRINTF((bus, "I/O rid %d start %#jx end %#jx\n", i, start, end)); rid = i; len = ios->length; r = bus_alloc_resource(bus, SYS_RES_IOPORT, &rid, start, end, len, rman_make_alignment_flags(len)); if (r == NULL) { DEVPRINTF((bus, "I/O rid %d failed\n", i)); goto not_this_one; } rle = resource_list_add(rl, SYS_RES_IOPORT, rid, rman_get_start(r), rman_get_end(r), len); if (rle == NULL) panic("Cannot add resource rid %d IOPORT", rid); rle->res = r; spaces++; } for (i = 0; i < cfe->num_memspace; i++) { mems = cfe->memspace + i; start = mems->cardaddr + mems->hostaddr; if (start) end = start + mems->length - 1; else end = ~0; DEVPRINTF((bus, "Memory rid %d start %#jx end %#jx\ncardaddr %#jx hostaddr %#jx length %#jx\n", i, start, end, mems->cardaddr, mems->hostaddr, mems->length)); rid = i; len = mems->length; r = bus_alloc_resource(bus, SYS_RES_MEMORY, &rid, start, end, len, rman_make_alignment_flags(len)); if (r == NULL) { DEVPRINTF((bus, "Memory rid %d failed\n", i)); // goto not_this_one; continue; } rle = resource_list_add(rl, SYS_RES_MEMORY, rid, rman_get_start(r), rman_get_end(r), len); if (rle == NULL) panic("Cannot add resource rid %d MEM", rid); rle->res = r; spaces++; } if (spaces == 0) { DEVPRINTF((bus, "Neither memory nor I/O mapped\n")); goto not_this_one; } if (cfe->irqmask) { rid = 0; r = bus_alloc_resource_any(bus, SYS_RES_IRQ, &rid, RF_SHAREABLE); if (r == NULL) { DEVPRINTF((bus, "IRQ rid %d failed\n", rid)); goto not_this_one; } rle = resource_list_add(rl, SYS_RES_IRQ, rid, rman_get_start(r), rman_get_end(r), 1); if (rle == NULL) panic("Cannot add resource rid %d IRQ", rid); rle->res = r; } /* If we get to here, we've allocated all we need */ pf->cfe = cfe; break; not_this_one:; DEVPRVERBOSE((bus, "Allocation failed for cfe %d\n", cfe->number)); resource_list_purge(rl); } } /* * Free resources allocated by pccard_function_init(), May be called as long * as the function is disabled. * * NOTE: This function should be unnecessary. pccard_function_init should * never keep resources initialized. */ static void pccard_function_free(struct pccard_function *pf) { struct pccard_ivar *devi = PCCARD_IVAR(pf->dev); struct resource_list_entry *rle; if (pf->pf_flags & PFF_ENABLED) { printf("pccard_function_free: function is enabled"); return; } STAILQ_FOREACH(rle, &devi->resources, link) { if (rle->res) { if (rman_get_device(rle->res) != pf->sc->dev) device_printf(pf->sc->dev, "function_free: Resource still owned by " "child, oops. " "(type=%d, rid=%d, addr=%#jx)\n", rle->type, rle->rid, rman_get_start(rle->res)); BUS_RELEASE_RESOURCE(device_get_parent(pf->sc->dev), pf->sc->dev, rle->type, rle->rid, rle->res); rle->res = NULL; } } resource_list_free(&devi->resources); } static void pccard_mfc_adjust_iobase(struct pccard_function *pf, rman_res_t addr, rman_res_t offset, rman_res_t size) { bus_size_t iosize, tmp; if (addr != 0) { if (pf->pf_mfc_iomax == 0) { pf->pf_mfc_iobase = addr + offset; pf->pf_mfc_iomax = pf->pf_mfc_iobase + size; } else { /* this makes the assumption that nothing overlaps */ if (pf->pf_mfc_iobase > addr + offset) pf->pf_mfc_iobase = addr + offset; if (pf->pf_mfc_iomax < addr + offset + size) pf->pf_mfc_iomax = addr + offset + size; } } tmp = pf->pf_mfc_iomax - pf->pf_mfc_iobase; /* round up to nearest (2^n)-1 */ for (iosize = 1; iosize < tmp; iosize <<= 1) ; iosize--; DEVPRINTF((pf->dev, "MFC: I/O base %#jx IOSIZE %#jx\n", (uintmax_t)pf->pf_mfc_iobase, (uintmax_t)(iosize + 1))); pccard_ccr_write(pf, PCCARD_CCR_IOBASE0, pf->pf_mfc_iobase & 0xff); pccard_ccr_write(pf, PCCARD_CCR_IOBASE1, (pf->pf_mfc_iobase >> 8) & 0xff); pccard_ccr_write(pf, PCCARD_CCR_IOBASE2, 0); pccard_ccr_write(pf, PCCARD_CCR_IOBASE3, 0); pccard_ccr_write(pf, PCCARD_CCR_IOSIZE, iosize); } /* Enable a PCCARD function */ static int pccard_function_enable(struct pccard_function *pf) { struct pccard_function *tmp; int reg; device_t dev = pf->sc->dev; if (pf->cfe == NULL) { DEVPRVERBOSE((dev, "No config entry could be allocated.\n")); return (ENOMEM); } if (pf->pf_flags & PFF_ENABLED) return (0); pf->sc->sc_enabled_count++; /* * it's possible for different functions' CCRs to be in the same * underlying page. Check for that. */ STAILQ_FOREACH(tmp, &pf->sc->card.pf_head, pf_list) { if ((tmp->pf_flags & PFF_ENABLED) && (pf->ccr_base >= (tmp->ccr_base - tmp->pf_ccr_offset)) && ((pf->ccr_base + PCCARD_CCR_SIZE) <= (tmp->ccr_base - tmp->pf_ccr_offset + tmp->pf_ccr_realsize))) { pf->pf_ccrt = tmp->pf_ccrt; pf->pf_ccrh = tmp->pf_ccrh; pf->pf_ccr_realsize = tmp->pf_ccr_realsize; /* * pf->pf_ccr_offset = (tmp->pf_ccr_offset - * tmp->ccr_base) + pf->ccr_base; */ /* pf->pf_ccr_offset = (tmp->pf_ccr_offset + pf->ccr_base) - tmp->ccr_base; */ pf->pf_ccr_window = tmp->pf_ccr_window; break; } } if (tmp == NULL) { pf->ccr_rid = 0; pf->ccr_res = bus_alloc_resource_anywhere(dev, SYS_RES_MEMORY, &pf->ccr_rid, PCCARD_MEM_PAGE_SIZE, RF_ACTIVE); if (!pf->ccr_res) goto bad; DEVPRINTF((dev, "ccr_res == %#jx-%#jx, base=%#x\n", rman_get_start(pf->ccr_res), rman_get_end(pf->ccr_res), pf->ccr_base)); CARD_SET_RES_FLAGS(device_get_parent(dev), dev, SYS_RES_MEMORY, pf->ccr_rid, PCCARD_A_MEM_ATTR); CARD_SET_MEMORY_OFFSET(device_get_parent(dev), dev, pf->ccr_rid, pf->ccr_base, &pf->pf_ccr_offset); pf->pf_ccrt = rman_get_bustag(pf->ccr_res); pf->pf_ccrh = rman_get_bushandle(pf->ccr_res); pf->pf_ccr_realsize = 1; } reg = (pf->cfe->number & PCCARD_CCR_OPTION_CFINDEX); reg |= PCCARD_CCR_OPTION_LEVIREQ; if (pccard_mfc(pf->sc)) { reg |= (PCCARD_CCR_OPTION_FUNC_ENABLE | PCCARD_CCR_OPTION_ADDR_DECODE); /* PCCARD_CCR_OPTION_IRQ_ENABLE set elsewhere as needed */ } pccard_ccr_write(pf, PCCARD_CCR_OPTION, reg); reg = 0; if ((pf->cfe->flags & PCCARD_CFE_IO16) == 0) reg |= PCCARD_CCR_STATUS_IOIS8; if (pf->cfe->flags & PCCARD_CFE_AUDIO) reg |= PCCARD_CCR_STATUS_AUDIO; pccard_ccr_write(pf, PCCARD_CCR_STATUS, reg); pccard_ccr_write(pf, PCCARD_CCR_SOCKETCOPY, 0); if (pccard_mfc(pf->sc)) pccard_mfc_adjust_iobase(pf, 0, 0, 0); #ifdef PCCARDDEBUG if (pccard_debug) { STAILQ_FOREACH(tmp, &pf->sc->card.pf_head, pf_list) { device_printf(tmp->sc->dev, "function %d CCR at %d offset %#x: " "%#x %#x %#x %#x, %#x %#x %#x %#x, %#x\n", tmp->number, tmp->pf_ccr_window, tmp->pf_ccr_offset, pccard_ccr_read(tmp, 0x00), pccard_ccr_read(tmp, 0x02), pccard_ccr_read(tmp, 0x04), pccard_ccr_read(tmp, 0x06), pccard_ccr_read(tmp, 0x0A), pccard_ccr_read(tmp, 0x0C), pccard_ccr_read(tmp, 0x0E), pccard_ccr_read(tmp, 0x10), pccard_ccr_read(tmp, 0x12)); } } #endif pf->pf_flags |= PFF_ENABLED; return (0); bad: /* * Decrement the reference count, and power down the socket, if * necessary. */ pf->sc->sc_enabled_count--; DEVPRINTF((dev, "bad --enabled_count = %d\n", pf->sc->sc_enabled_count)); return (1); } /* Disable PCCARD function. */ static void pccard_function_disable(struct pccard_function *pf) { struct pccard_function *tmp; device_t dev = pf->sc->dev; if (pf->cfe == NULL) panic("pccard_function_disable: function not initialized"); if ((pf->pf_flags & PFF_ENABLED) == 0) return; if (pf->intr_handler != NULL) { struct pccard_ivar *devi = PCCARD_IVAR(pf->dev); struct resource_list_entry *rle = resource_list_find(&devi->resources, SYS_RES_IRQ, 0); if (rle == NULL) panic("Can't disable an interrupt with no IRQ res\n"); BUS_TEARDOWN_INTR(dev, pf->dev, rle->res, pf->intr_handler_cookie); } /* * it's possible for different functions' CCRs to be in the same * underlying page. Check for that. Note we mark us as disabled * first to avoid matching ourself. */ pf->pf_flags &= ~PFF_ENABLED; STAILQ_FOREACH(tmp, &pf->sc->card.pf_head, pf_list) { if ((tmp->pf_flags & PFF_ENABLED) && (pf->ccr_base >= (tmp->ccr_base - tmp->pf_ccr_offset)) && ((pf->ccr_base + PCCARD_CCR_SIZE) <= (tmp->ccr_base - tmp->pf_ccr_offset + tmp->pf_ccr_realsize))) break; } /* Not used by anyone else; unmap the CCR. */ if (tmp == NULL) { bus_release_resource(dev, SYS_RES_MEMORY, pf->ccr_rid, pf->ccr_res); pf->ccr_res = NULL; } /* * Decrement the reference count, and power down the socket, if * necessary. */ pf->sc->sc_enabled_count--; } #define PCCARD_NPORT 2 #define PCCARD_NMEM 5 #define PCCARD_NIRQ 1 #define PCCARD_NDRQ 0 static int pccard_probe(device_t dev) { device_set_desc(dev, "16-bit PCCard bus"); return (0); } static int pccard_attach(device_t dev) { struct pccard_softc *sc = PCCARD_SOFTC(dev); int err; sc->dev = dev; sc->sc_enabled_count = 0; if ((err = pccard_device_create(sc)) != 0) return (err); gone_in_dev(dev, 13, "PC Card to be removed."); STAILQ_INIT(&sc->card.pf_head); return (bus_generic_attach(dev)); } static int pccard_detach(device_t dev) { pccard_detach_card(dev); pccard_device_destroy(device_get_softc(dev)); return (0); } static int pccard_suspend(device_t self) { pccard_detach_card(self); return (0); } static int pccard_resume(device_t self) { return (0); } static void pccard_print_resources(struct resource_list *rl, const char *name, int type, int count, const char *format) { struct resource_list_entry *rle; int printed; int i; printed = 0; for (i = 0; i < count; i++) { rle = resource_list_find(rl, type, i); if (rle != NULL) { if (printed == 0) printf(" %s ", name); else if (printed > 0) printf(","); printed++; printf(format, rle->start); if (rle->count > 1) { printf("-"); printf(format, rle->start + rle->count - 1); } } else if (i > 3) { /* check the first few regardless */ break; } } } static int pccard_print_child(device_t dev, device_t child) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct resource_list *rl = &devi->resources; int retval = 0; retval += bus_print_child_header(dev, child); retval += printf(" at"); if (devi != NULL) { pccard_print_resources(rl, "port", SYS_RES_IOPORT, PCCARD_NPORT, "%#lx"); pccard_print_resources(rl, "iomem", SYS_RES_MEMORY, PCCARD_NMEM, "%#lx"); pccard_print_resources(rl, "irq", SYS_RES_IRQ, PCCARD_NIRQ, "%ld"); pccard_print_resources(rl, "drq", SYS_RES_DRQ, PCCARD_NDRQ, "%ld"); retval += printf(" function %d config %d", devi->pf->number, devi->pf->cfe->number); } retval += bus_print_child_footer(dev, child); return (retval); } static int pccard_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct resource_list *rl = &devi->resources; if (type != SYS_RES_IOPORT && type != SYS_RES_MEMORY && type != SYS_RES_IRQ && type != SYS_RES_DRQ) return (EINVAL); if (rid < 0) return (EINVAL); if (type == SYS_RES_IOPORT && rid >= PCCARD_NPORT) return (EINVAL); if (type == SYS_RES_MEMORY && rid >= PCCARD_NMEM) return (EINVAL); if (type == SYS_RES_IRQ && rid >= PCCARD_NIRQ) return (EINVAL); if (type == SYS_RES_DRQ && rid >= PCCARD_NDRQ) return (EINVAL); resource_list_add(rl, type, rid, start, start + count - 1, count); if (NULL != resource_list_alloc(rl, device_get_parent(dev), dev, type, &rid, start, start + count - 1, count, 0)) return 0; else return ENOMEM; } static int pccard_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct resource_list *rl = &devi->resources; struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle == NULL) return (ENOENT); if (startp != NULL) *startp = rle->start; if (countp != NULL) *countp = rle->count; return (0); } static void pccard_delete_resource(device_t dev, device_t child, int type, int rid) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct resource_list *rl = &devi->resources; resource_list_delete(rl, type, rid); } static int pccard_set_res_flags(device_t dev, device_t child, int type, int rid, u_long flags) { return (CARD_SET_RES_FLAGS(device_get_parent(dev), child, type, rid, flags)); } static int pccard_set_memory_offset(device_t dev, device_t child, int rid, uint32_t offset, uint32_t *deltap) { return (CARD_SET_MEMORY_OFFSET(device_get_parent(dev), child, rid, offset, deltap)); } static void pccard_probe_nomatch(device_t bus, device_t child) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; struct pccard_softc *sc = PCCARD_SOFTC(bus); int i; device_printf(bus, ""); printf(" (manufacturer=0x%04x, product=0x%04x, function_type=%d) " "at function %d\n", sc->card.manufacturer, sc->card.product, pf->function, pf->number); device_printf(bus, " CIS info: "); for (i = 0; sc->card.cis1_info[i] != NULL && i < 4; i++) printf("%s%s", i > 0 ? ", " : "", sc->card.cis1_info[i]); printf("\n"); return; } static int -pccard_child_location_str(device_t bus, device_t child, char *buf, - size_t buflen) +pccard_child_location(device_t bus, device_t child, struct sbuf *sb) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; - snprintf(buf, buflen, "function=%d", pf->number); + sbuf_printf(sb, "function=%d", pf->number); return (0); } static int -pccard_child_pnpinfo_str(device_t bus, device_t child, char *buf, - size_t buflen) +pccard_child_pnpinfo(device_t bus, device_t child, struct sbuf *sb) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; struct pccard_softc *sc = PCCARD_SOFTC(bus); - struct sbuf sb; - sbuf_new(&sb, buf, buflen, SBUF_FIXEDLEN | SBUF_INCLUDENUL); - sbuf_printf(&sb, "manufacturer=0x%04x product=0x%04x " + sbuf_printf(sb, "manufacturer=0x%04x product=0x%04x " "cisvendor=\"", sc->card.manufacturer, sc->card.product); - devctl_safe_quote_sb(&sb, sc->card.cis1_info[0]); - sbuf_printf(&sb, "\" cisproduct=\""); - devctl_safe_quote_sb(&sb, sc->card.cis1_info[1]); - sbuf_printf(&sb, "\" function_type=%d", pf->function); - sbuf_finish(&sb); - sbuf_delete(&sb); + devctl_safe_quote_sb(sb, sc->card.cis1_info[0]); + sbuf_printf(sb, "\" cisproduct=\""); + devctl_safe_quote_sb(sb, sc->card.cis1_info[1]); + sbuf_printf(sb, "\" function_type=%d", pf->function); return (0); } static int pccard_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; struct pccard_softc *sc = PCCARD_SOFTC(bus); if (!pf) panic("No pccard function pointer"); switch (which) { default: return (EINVAL); case PCCARD_IVAR_FUNCE_DISK: *(uint16_t *)result = pf->pf_funce_disk_interface | (pf->pf_funce_disk_power << 8); break; case PCCARD_IVAR_ETHADDR: bcopy(pf->pf_funce_lan_nid, result, ETHER_ADDR_LEN); break; case PCCARD_IVAR_VENDOR: *(uint32_t *)result = sc->card.manufacturer; break; case PCCARD_IVAR_PRODUCT: *(uint32_t *)result = sc->card.product; break; case PCCARD_IVAR_PRODEXT: *(uint16_t *)result = sc->card.prodext; break; case PCCARD_IVAR_FUNCTION: *(uint32_t *)result = pf->function; break; case PCCARD_IVAR_FUNCTION_NUMBER: *(uint32_t *)result = pf->number; break; case PCCARD_IVAR_VENDOR_STR: *(const char **)result = sc->card.cis1_info[0]; break; case PCCARD_IVAR_PRODUCT_STR: *(const char **)result = sc->card.cis1_info[1]; break; case PCCARD_IVAR_CIS3_STR: *(const char **)result = sc->card.cis1_info[2]; break; case PCCARD_IVAR_CIS4_STR: *(const char **)result = sc->card.cis1_info[3]; break; } return (0); } static void pccard_driver_added(device_t dev, driver_t *driver) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_function *pf; device_t child; STAILQ_FOREACH(pf, &sc->card.pf_head, pf_list) { if (STAILQ_EMPTY(&pf->cfe_head)) continue; child = pf->dev; if (device_get_state(child) != DS_NOTPRESENT) continue; pccard_probe_and_attach_child(dev, child, pf); } return; } static struct resource * pccard_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pccard_ivar *dinfo; struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != dev); int isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); struct resource *r = NULL; /* XXX I'm no longer sure this is right */ if (passthrough) { return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); } dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, type, *rid); if (rle == NULL && isdefault) return (NULL); /* no resource of that type/rid */ if (rle == NULL || rle->res == NULL) { /* XXX Need to adjust flags */ r = bus_alloc_resource(dev, type, rid, start, end, count, flags); if (r == NULL) goto bad; resource_list_add(&dinfo->resources, type, *rid, rman_get_start(r), rman_get_end(r), count); rle = resource_list_find(&dinfo->resources, type, *rid); if (!rle) goto bad; rle->res = r; } /* * If dev doesn't own the device, then we can't give this device * out. */ if (rman_get_device(rle->res) != dev) return (NULL); rman_set_device(rle->res, child); if (flags & RF_ACTIVE) BUS_ACTIVATE_RESOURCE(dev, child, type, *rid, rle->res); return (rle->res); bad:; device_printf(dev, "WARNING: Resource not reserved by pccard\n"); return (NULL); } static int pccard_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pccard_ivar *dinfo; int passthrough = (device_get_parent(child) != dev); struct resource_list_entry *rle = NULL; if (passthrough) return BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r); dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, type, rid); if (!rle) { device_printf(dev, "Allocated resource not found, " "%d %#x %#jx %#jx\n", type, rid, rman_get_start(r), rman_get_size(r)); return ENOENT; } if (!rle->res) { device_printf(dev, "Allocated resource not recorded\n"); return ENOENT; } /* * Deactivate the resource (since it is being released), and * assign it to the bus. */ BUS_DEACTIVATE_RESOURCE(dev, child, type, rid, rle->res); rman_set_device(rle->res, dev); return (0); } static void pccard_child_detached(device_t parent, device_t dev) { struct pccard_ivar *ivar = PCCARD_IVAR(dev); struct pccard_function *pf = ivar->pf; pccard_function_disable(pf); } static int pccard_filter(void *arg) { struct pccard_function *pf = (struct pccard_function*) arg; int reg; int doisr = 1; /* * MFC cards know if they interrupted, so we have to ack the * interrupt and call the ISR. Non-MFC cards don't have these * bits, so they always get called. Many non-MFC cards have * this bit set always upon read, but some do not. * * We always ack the interrupt, even if there's no ISR * for the card. This is done on the theory that acking * the interrupt will pacify the card enough to keep an * interrupt storm from happening. Of course this won't * help in the non-MFC case. * * This has no impact for MPSAFEness of the client drivers. * We register this with whatever flags the intr_handler * was registered with. All these functions are MPSAFE. */ if (pccard_mfc(pf->sc)) { reg = pccard_ccr_read(pf, PCCARD_CCR_STATUS); if (reg & PCCARD_CCR_STATUS_INTR) pccard_ccr_write(pf, PCCARD_CCR_STATUS, reg & ~PCCARD_CCR_STATUS_INTR); else doisr = 0; } if (doisr) { if (pf->intr_filter != NULL) return (pf->intr_filter(pf->intr_handler_arg)); return (FILTER_SCHEDULE_THREAD); } return (FILTER_STRAY); } static void pccard_intr(void *arg) { struct pccard_function *pf = (struct pccard_function*) arg; pf->intr_handler(pf->intr_handler_arg); } static int pccard_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_ivar *ivar = PCCARD_IVAR(child); struct pccard_function *pf = ivar->pf; int err; if (pf->intr_filter != NULL || pf->intr_handler != NULL) panic("Only one interrupt handler per function allowed"); pf->intr_filter = filt; pf->intr_handler = intr; pf->intr_handler_arg = arg; err = bus_generic_setup_intr(dev, child, irq, flags, pccard_filter, intr ? pccard_intr : NULL, pf, cookiep); if (err != 0) { pf->intr_filter = NULL; pf->intr_handler = NULL; return (err); } pf->intr_handler_cookie = *cookiep; if (pccard_mfc(sc)) { pccard_ccr_write(pf, PCCARD_CCR_OPTION, pccard_ccr_read(pf, PCCARD_CCR_OPTION) | PCCARD_CCR_OPTION_IREQ_ENABLE); } return (0); } static int pccard_teardown_intr(device_t dev, device_t child, struct resource *r, void *cookie) { struct pccard_softc *sc = PCCARD_SOFTC(dev); struct pccard_ivar *ivar = PCCARD_IVAR(child); struct pccard_function *pf = ivar->pf; int ret; if (pccard_mfc(sc)) { pccard_ccr_write(pf, PCCARD_CCR_OPTION, pccard_ccr_read(pf, PCCARD_CCR_OPTION) & ~PCCARD_CCR_OPTION_IREQ_ENABLE); } ret = bus_generic_teardown_intr(dev, child, r, cookie); if (ret == 0) { pf->intr_handler = NULL; pf->intr_handler_arg = NULL; pf->intr_handler_cookie = NULL; } return (ret); } static int pccard_activate_resource(device_t brdev, device_t child, int type, int rid, struct resource *r) { struct pccard_ivar *ivar = PCCARD_IVAR(child); struct pccard_function *pf = ivar->pf; switch(type) { case SYS_RES_IOPORT: /* * We need to adjust IOBASE[01] and IOSIZE if we're an MFC * card. */ if (pccard_mfc(pf->sc)) pccard_mfc_adjust_iobase(pf, rman_get_start(r), 0, rman_get_size(r)); break; default: break; } return (bus_generic_activate_resource(brdev, child, type, rid, r)); } static int pccard_deactivate_resource(device_t brdev, device_t child, int type, int rid, struct resource *r) { /* XXX undo pccard_activate_resource? XXX */ return (bus_generic_deactivate_resource(brdev, child, type, rid, r)); } static int pccard_attr_read_impl(device_t brdev, device_t child, uint32_t offset, uint8_t *val) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; /* * Optimization. Most of the time, devices want to access * the same page of the attribute memory that the CCR is in. * We take advantage of this fact here. */ if (offset / PCCARD_MEM_PAGE_SIZE == pf->ccr_base / PCCARD_MEM_PAGE_SIZE) *val = bus_space_read_1(pf->pf_ccrt, pf->pf_ccrh, offset % PCCARD_MEM_PAGE_SIZE); else { CARD_SET_MEMORY_OFFSET(brdev, child, pf->ccr_rid, offset, &offset); *val = bus_space_read_1(pf->pf_ccrt, pf->pf_ccrh, offset); CARD_SET_MEMORY_OFFSET(brdev, child, pf->ccr_rid, pf->ccr_base, &offset); } return 0; } static int pccard_attr_write_impl(device_t brdev, device_t child, uint32_t offset, uint8_t val) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; /* * Optimization. Most of the time, devices want to access * the same page of the attribute memory that the CCR is in. * We take advantage of this fact here. */ if (offset / PCCARD_MEM_PAGE_SIZE == pf->ccr_base / PCCARD_MEM_PAGE_SIZE) bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, offset % PCCARD_MEM_PAGE_SIZE, val); else { CARD_SET_MEMORY_OFFSET(brdev, child, pf->ccr_rid, offset, &offset); bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, offset, val); CARD_SET_MEMORY_OFFSET(brdev, child, pf->ccr_rid, pf->ccr_base, &offset); } return 0; } static int pccard_ccr_read_impl(device_t brdev, device_t child, uint32_t offset, uint8_t *val) { struct pccard_ivar *devi = PCCARD_IVAR(child); *val = pccard_ccr_read(devi->pf, offset); DEVPRINTF((child, "ccr_read of %#x (%#x) is %#x\n", offset, devi->pf->pf_ccr_offset, *val)); return 0; } static int pccard_ccr_write_impl(device_t brdev, device_t child, uint32_t offset, uint8_t val) { struct pccard_ivar *devi = PCCARD_IVAR(child); struct pccard_function *pf = devi->pf; /* * Can't use pccard_ccr_write since client drivers may access * registers not contained in the 'mask' if they are non-standard. */ DEVPRINTF((child, "ccr_write of %#x to %#x (%#x)\n", val, offset, devi->pf->pf_ccr_offset)); bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, pf->pf_ccr_offset + offset, val); return 0; } static device_method_t pccard_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pccard_probe), DEVMETHOD(device_attach, pccard_attach), DEVMETHOD(device_detach, pccard_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, pccard_suspend), DEVMETHOD(device_resume, pccard_resume), /* Bus interface */ DEVMETHOD(bus_print_child, pccard_print_child), DEVMETHOD(bus_driver_added, pccard_driver_added), DEVMETHOD(bus_child_detached, pccard_child_detached), DEVMETHOD(bus_alloc_resource, pccard_alloc_resource), DEVMETHOD(bus_release_resource, pccard_release_resource), DEVMETHOD(bus_activate_resource, pccard_activate_resource), DEVMETHOD(bus_deactivate_resource, pccard_deactivate_resource), DEVMETHOD(bus_setup_intr, pccard_setup_intr), DEVMETHOD(bus_teardown_intr, pccard_teardown_intr), DEVMETHOD(bus_set_resource, pccard_set_resource), DEVMETHOD(bus_get_resource, pccard_get_resource), DEVMETHOD(bus_delete_resource, pccard_delete_resource), DEVMETHOD(bus_probe_nomatch, pccard_probe_nomatch), DEVMETHOD(bus_read_ivar, pccard_read_ivar), - DEVMETHOD(bus_child_pnpinfo_str, pccard_child_pnpinfo_str), - DEVMETHOD(bus_child_location_str, pccard_child_location_str), + DEVMETHOD(bus_child_pnpinfo, pccard_child_pnpinfo), + DEVMETHOD(bus_child_location, pccard_child_location), /* Card Interface */ DEVMETHOD(card_set_res_flags, pccard_set_res_flags), DEVMETHOD(card_set_memory_offset, pccard_set_memory_offset), DEVMETHOD(card_attach_card, pccard_attach_card), DEVMETHOD(card_detach_card, pccard_detach_card), DEVMETHOD(card_do_product_lookup, pccard_do_product_lookup), DEVMETHOD(card_cis_scan, pccard_scan_cis), DEVMETHOD(card_attr_read, pccard_attr_read_impl), DEVMETHOD(card_attr_write, pccard_attr_write_impl), DEVMETHOD(card_ccr_read, pccard_ccr_read_impl), DEVMETHOD(card_ccr_write, pccard_ccr_write_impl), { 0, 0 } }; static driver_t pccard_driver = { "pccard", pccard_methods, sizeof(struct pccard_softc) }; devclass_t pccard_devclass; /* Maybe we need to have a slot device? */ DRIVER_MODULE(pccard, pcic, pccard_driver, pccard_devclass, 0, 0); DRIVER_MODULE(pccard, cbb, pccard_driver, pccard_devclass, 0, 0); MODULE_VERSION(pccard, 1); diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c index ef138e926b6f..c215083d3121 100644 --- a/sys/dev/pci/pci.c +++ b/sys/dev/pci/pci.c @@ -1,6771 +1,6770 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include "opt_iommu.h" #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) #include #endif #include #include #include #include #ifdef PCI_IOV #include #include #endif #include #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #define PCIR_IS_BIOS(cfg, reg) \ (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \ ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1)) static int pci_has_quirk(uint32_t devid, int quirk); static pci_addr_t pci_mapbase(uint64_t mapreg); static const char *pci_maptype(uint64_t mapreg); static int pci_maprange(uint64_t mapreg); static pci_addr_t pci_rombase(uint64_t mapreg); static int pci_romsize(uint64_t testval); static void pci_fixancient(pcicfgregs *cfg); static int pci_printf(pcicfgregs *cfg, const char *fmt, ...); static int pci_porten(device_t dev); static int pci_memen(device_t dev); static void pci_assign_interrupt(device_t bus, device_t dev, int force_route); static int pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, int force, int prefetch); static int pci_probe(device_t dev); static void pci_load_vendor_data(void); static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc); static char *pci_describe_device(device_t dev); static int pci_modevent(module_t mod, int what, void *arg); static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg); static void pci_read_cap(device_t pcib, pcicfgregs *cfg); static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data); #if 0 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data); #endif static void pci_read_vpd(device_t pcib, pcicfgregs *cfg); static void pci_mask_msix(device_t dev, u_int index); static void pci_unmask_msix(device_t dev, u_int index); static int pci_msi_blacklisted(void); static int pci_msix_blacklisted(void); static void pci_resume_msi(device_t dev); static void pci_resume_msix(device_t dev); static int pci_remap_intr_method(device_t bus, device_t dev, u_int irq); static void pci_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp); static int pci_reset_post(device_t dev, device_t child); static int pci_reset_prepare(device_t dev, device_t child); static int pci_reset_child(device_t dev, device_t child, int flags); static int pci_get_id_method(device_t dev, device_t child, enum pci_id_type type, uintptr_t *rid); static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f, uint16_t vid, uint16_t did); static device_method_t pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pci_probe), DEVMETHOD(device_attach, pci_attach), DEVMETHOD(device_detach, pci_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, pci_resume), /* Bus interface */ DEVMETHOD(bus_print_child, pci_print_child), DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch), DEVMETHOD(bus_read_ivar, pci_read_ivar), DEVMETHOD(bus_write_ivar, pci_write_ivar), DEVMETHOD(bus_driver_added, pci_driver_added), DEVMETHOD(bus_setup_intr, pci_setup_intr), DEVMETHOD(bus_teardown_intr, pci_teardown_intr), DEVMETHOD(bus_reset_prepare, pci_reset_prepare), DEVMETHOD(bus_reset_post, pci_reset_post), DEVMETHOD(bus_reset_child, pci_reset_child), DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag), DEVMETHOD(bus_get_resource_list,pci_get_resource_list), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, pci_delete_resource), DEVMETHOD(bus_alloc_resource, pci_alloc_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, pci_release_resource), DEVMETHOD(bus_activate_resource, pci_activate_resource), DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource), DEVMETHOD(bus_child_deleted, pci_child_deleted), DEVMETHOD(bus_child_detached, pci_child_detached), - DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method), - DEVMETHOD(bus_child_location_str, pci_child_location_str_method), + DEVMETHOD(bus_child_pnpinfo, pci_child_pnpinfo_method), + DEVMETHOD(bus_child_location, pci_child_location_method), DEVMETHOD(bus_hint_device_unit, pci_hint_device_unit), DEVMETHOD(bus_remap_intr, pci_remap_intr_method), DEVMETHOD(bus_suspend_child, pci_suspend_child), DEVMETHOD(bus_resume_child, pci_resume_child), DEVMETHOD(bus_rescan, pci_rescan_method), /* PCI interface */ DEVMETHOD(pci_read_config, pci_read_config_method), DEVMETHOD(pci_write_config, pci_write_config_method), DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method), DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method), DEVMETHOD(pci_enable_io, pci_enable_io_method), DEVMETHOD(pci_disable_io, pci_disable_io_method), DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method), DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method), DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method), DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method), DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method), DEVMETHOD(pci_find_cap, pci_find_cap_method), DEVMETHOD(pci_find_next_cap, pci_find_next_cap_method), DEVMETHOD(pci_find_extcap, pci_find_extcap_method), DEVMETHOD(pci_find_next_extcap, pci_find_next_extcap_method), DEVMETHOD(pci_find_htcap, pci_find_htcap_method), DEVMETHOD(pci_find_next_htcap, pci_find_next_htcap_method), DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method), DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method), DEVMETHOD(pci_enable_msi, pci_enable_msi_method), DEVMETHOD(pci_enable_msix, pci_enable_msix_method), DEVMETHOD(pci_disable_msi, pci_disable_msi_method), DEVMETHOD(pci_remap_msix, pci_remap_msix_method), DEVMETHOD(pci_release_msi, pci_release_msi_method), DEVMETHOD(pci_msi_count, pci_msi_count_method), DEVMETHOD(pci_msix_count, pci_msix_count_method), DEVMETHOD(pci_msix_pba_bar, pci_msix_pba_bar_method), DEVMETHOD(pci_msix_table_bar, pci_msix_table_bar_method), DEVMETHOD(pci_get_id, pci_get_id_method), DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method), DEVMETHOD(pci_child_added, pci_child_added_method), #ifdef PCI_IOV DEVMETHOD(pci_iov_attach, pci_iov_attach_method), DEVMETHOD(pci_iov_detach, pci_iov_detach_method), DEVMETHOD(pci_create_iov_child, pci_create_iov_child_method), #endif DEVMETHOD_END }; DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc)); static devclass_t pci_devclass; EARLY_DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL, BUS_PASS_BUS); MODULE_VERSION(pci, 1); static char *pci_vendordata; static size_t pci_vendordata_size; struct pci_quirk { uint32_t devid; /* Vendor/device of the card */ int type; #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */ #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */ #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */ #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */ #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */ #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */ #define PCI_QUIRK_REALLOC_BAR 7 /* Can't allocate memory at the default address */ int arg1; int arg2; }; static const struct pci_quirk pci_quirks[] = { /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */ { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 }, { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 }, /* As does the Serverworks OSB4 (the SMBus mapping register) */ { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 }, /* * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge * or the CMIC-SL (AKA ServerWorks GC_LE). */ { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI doesn't work on earlier Intel chipsets including * E7500, E7501, E7505, 845, 865, 875/E7210, and 855. */ { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI doesn't work with devices behind the AMD 8131 HT-PCIX * bridge. */ { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * Some virtualization environments emulate an older chipset * but support MSI just fine. QEMU uses the Intel 82440. */ { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 }, /* * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus * controller depending on SoftPciRst register (PM_IO 0x55 [7]). * It prevents us from attaching hpet(4) when the bit is unset. * Note this quirk only affects SB600 revision A13 and earlier. * For SB600 A21 and later, firmware must set the bit to hide it. * For SB700 and later, it is unused and hardcoded to zero. */ { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 }, /* * Atheros AR8161/AR8162/E2200/E2400/E2500 Ethernet controllers have * a bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit * of the command register is set. */ { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0B11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't * issue MSI interrupts with PCIM_CMD_INTxDIS set either. */ { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */ { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */ { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */ { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */ { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */ { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */ /* * HPE Gen 10 VGA has a memory range that can't be allocated in the * expected place. */ { 0x98741002, PCI_QUIRK_REALLOC_BAR, 0, 0 }, { 0 } }; /* map register information */ #define PCI_MAPMEM 0x01 /* memory map */ #define PCI_MAPMEMP 0x02 /* prefetchable memory map */ #define PCI_MAPPORT 0x04 /* port map */ struct devlist pci_devq; uint32_t pci_generation; uint32_t pci_numdevs = 0; static int pcie_chipset, pcix_chipset; /* sysctl vars */ SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "PCI bus tuning parameters"); static int pci_enable_io_modes = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN, &pci_enable_io_modes, 1, "Enable I/O and memory bits in the config register. Some BIOSes do not" " enable these bits correctly. We'd like to do this all the time, but" " there are some peripherals that this causes problems with."); static int pci_do_realloc_bars = 1; SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN, &pci_do_realloc_bars, 0, "Attempt to allocate a new range for any BARs whose original " "firmware-assigned ranges fail to allocate during the initial device scan."); static int pci_do_power_nodriver = 0; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN, &pci_do_power_nodriver, 0, "Place a function into D3 state when no driver attaches to it. 0 means" " disable. 1 means conservatively place devices into D3 state. 2 means" " aggressively place devices into D3 state. 3 means put absolutely" " everything in D3 state."); int pci_do_power_resume = 1; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN, &pci_do_power_resume, 1, "Transition from D3 -> D0 on resume."); int pci_do_power_suspend = 1; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN, &pci_do_power_suspend, 1, "Transition from D0 -> D3 on suspend."); static int pci_do_msi = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1, "Enable support for MSI interrupts"); static int pci_do_msix = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1, "Enable support for MSI-X interrupts"); static int pci_msix_rewrite_table = 0; SYSCTL_INT(_hw_pci, OID_AUTO, msix_rewrite_table, CTLFLAG_RWTUN, &pci_msix_rewrite_table, 0, "Rewrite entire MSI-X table when updating MSI-X entries"); static int pci_honor_msi_blacklist = 1; SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN, &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X"); #if defined(__i386__) || defined(__amd64__) static int pci_usb_takeover = 1; #else static int pci_usb_takeover = 0; #endif SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN, &pci_usb_takeover, 1, "Enable early takeover of USB controllers. Disable this if you depend on" " BIOS emulation of USB devices, that is you use USB devices (like" " keyboard or mouse) but do not load USB drivers"); static int pci_clear_bars; SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0, "Ignore firmware-assigned resources for BARs."); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static int pci_clear_buses; SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0, "Ignore firmware-assigned bus numbers."); #endif static int pci_enable_ari = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari, 0, "Enable support for PCIe Alternative RID Interpretation"); int pci_enable_aspm = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_aspm, CTLFLAG_RDTUN, &pci_enable_aspm, 0, "Enable support for PCIe Active State Power Management"); static int pci_clear_aer_on_attach = 0; SYSCTL_INT(_hw_pci, OID_AUTO, clear_aer_on_attach, CTLFLAG_RWTUN, &pci_clear_aer_on_attach, 0, "Clear port and device AER state on driver attach"); static int pci_has_quirk(uint32_t devid, int quirk) { const struct pci_quirk *q; for (q = &pci_quirks[0]; q->devid; q++) { if (q->devid == devid && q->type == quirk) return (1); } return (0); } /* Find a device_t by bus/slot/function in domain 0 */ device_t pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func) { return (pci_find_dbsf(0, bus, slot, func)); } /* Find a device_t by domain/bus/slot/function */ device_t pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func) { struct pci_devinfo *dinfo = NULL; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if ((dinfo->cfg.domain == domain) && (dinfo->cfg.bus == bus) && (dinfo->cfg.slot == slot) && (dinfo->cfg.func == func)) { break; } } return (dinfo != NULL ? dinfo->cfg.dev : NULL); } /* Find a device_t by vendor/device ID */ device_t pci_find_device(uint16_t vendor, uint16_t device) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if ((dinfo->cfg.vendor == vendor) && (dinfo->cfg.device == device)) { return (dinfo->cfg.dev); } } return (NULL); } device_t pci_find_class(uint8_t class, uint8_t subclass) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if (dinfo->cfg.baseclass == class && dinfo->cfg.subclass == subclass) { return (dinfo->cfg.dev); } } return (NULL); } device_t pci_find_class_from(uint8_t class, uint8_t subclass, device_t from) { struct pci_devinfo *dinfo; bool found = false; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if (from != NULL && found == false) { if (from != dinfo->cfg.dev) continue; found = true; continue; } if (dinfo->cfg.baseclass == class && dinfo->cfg.subclass == subclass) { return (dinfo->cfg.dev); } } return (NULL); } static int pci_printf(pcicfgregs *cfg, const char *fmt, ...) { va_list ap; int retval; retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot, cfg->func); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } /* return base address of memory or port map */ static pci_addr_t pci_mapbase(uint64_t mapreg) { if (PCI_BAR_MEM(mapreg)) return (mapreg & PCIM_BAR_MEM_BASE); else return (mapreg & PCIM_BAR_IO_BASE); } /* return map type of memory or port map */ static const char * pci_maptype(uint64_t mapreg) { if (PCI_BAR_IO(mapreg)) return ("I/O Port"); if (mapreg & PCIM_BAR_MEM_PREFETCH) return ("Prefetchable Memory"); return ("Memory"); } /* return log2 of map size decoded for memory or port map */ int pci_mapsize(uint64_t testval) { int ln2size; testval = pci_mapbase(testval); ln2size = 0; if (testval != 0) { while ((testval & 1) == 0) { ln2size++; testval >>= 1; } } return (ln2size); } /* return base address of device ROM */ static pci_addr_t pci_rombase(uint64_t mapreg) { return (mapreg & PCIM_BIOS_ADDR_MASK); } /* return log2 of map size decided for device ROM */ static int pci_romsize(uint64_t testval) { int ln2size; testval = pci_rombase(testval); ln2size = 0; if (testval != 0) { while ((testval & 1) == 0) { ln2size++; testval >>= 1; } } return (ln2size); } /* return log2 of address range supported by map register */ static int pci_maprange(uint64_t mapreg) { int ln2range = 0; if (PCI_BAR_IO(mapreg)) ln2range = 32; else switch (mapreg & PCIM_BAR_MEM_TYPE) { case PCIM_BAR_MEM_32: ln2range = 32; break; case PCIM_BAR_MEM_1MB: ln2range = 20; break; case PCIM_BAR_MEM_64: ln2range = 64; break; } return (ln2range); } /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */ static void pci_fixancient(pcicfgregs *cfg) { if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL) return; /* PCI to PCI bridges use header type 1 */ if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI) cfg->hdrtype = PCIM_HDRTYPE_BRIDGE; } /* extract header type specific config data */ static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: cfg->subvendor = REG(PCIR_SUBVEND_0, 2); cfg->subdevice = REG(PCIR_SUBDEV_0, 2); cfg->mingnt = REG(PCIR_MINGNT, 1); cfg->maxlat = REG(PCIR_MAXLAT, 1); cfg->nummaps = PCI_MAXMAPS_0; break; case PCIM_HDRTYPE_BRIDGE: cfg->bridge.br_seclat = REG(PCIR_SECLAT_1, 1); cfg->bridge.br_subbus = REG(PCIR_SUBBUS_1, 1); cfg->bridge.br_secbus = REG(PCIR_SECBUS_1, 1); cfg->bridge.br_pribus = REG(PCIR_PRIBUS_1, 1); cfg->bridge.br_control = REG(PCIR_BRIDGECTL_1, 2); cfg->nummaps = PCI_MAXMAPS_1; break; case PCIM_HDRTYPE_CARDBUS: cfg->bridge.br_seclat = REG(PCIR_SECLAT_2, 1); cfg->bridge.br_subbus = REG(PCIR_SUBBUS_2, 1); cfg->bridge.br_secbus = REG(PCIR_SECBUS_2, 1); cfg->bridge.br_pribus = REG(PCIR_PRIBUS_2, 1); cfg->bridge.br_control = REG(PCIR_BRIDGECTL_2, 2); cfg->subvendor = REG(PCIR_SUBVEND_2, 2); cfg->subdevice = REG(PCIR_SUBDEV_2, 2); cfg->nummaps = PCI_MAXMAPS_2; break; } #undef REG } /* read configuration header into pcicfgregs structure */ struct pci_devinfo * pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f) { #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) uint16_t vid, did; vid = REG(PCIR_VENDOR, 2); did = REG(PCIR_DEVICE, 2); if (vid != 0xffff) return (pci_fill_devinfo(pcib, bus, d, b, s, f, vid, did)); return (NULL); } struct pci_devinfo * pci_alloc_devinfo_method(device_t dev) { return (malloc(sizeof(struct pci_devinfo), M_DEVBUF, M_WAITOK | M_ZERO)); } static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f, uint16_t vid, uint16_t did) { struct pci_devinfo *devlist_entry; pcicfgregs *cfg; devlist_entry = PCI_ALLOC_DEVINFO(bus); cfg = &devlist_entry->cfg; cfg->domain = d; cfg->bus = b; cfg->slot = s; cfg->func = f; cfg->vendor = vid; cfg->device = did; cfg->cmdreg = REG(PCIR_COMMAND, 2); cfg->statreg = REG(PCIR_STATUS, 2); cfg->baseclass = REG(PCIR_CLASS, 1); cfg->subclass = REG(PCIR_SUBCLASS, 1); cfg->progif = REG(PCIR_PROGIF, 1); cfg->revid = REG(PCIR_REVID, 1); cfg->hdrtype = REG(PCIR_HDRTYPE, 1); cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1); cfg->lattimer = REG(PCIR_LATTIMER, 1); cfg->intpin = REG(PCIR_INTPIN, 1); cfg->intline = REG(PCIR_INTLINE, 1); cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0; cfg->hdrtype &= ~PCIM_MFDEV; STAILQ_INIT(&cfg->maps); cfg->iov = NULL; pci_fixancient(cfg); pci_hdrtypedata(pcib, b, s, f, cfg); if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) pci_read_cap(pcib, cfg); STAILQ_INSERT_TAIL(&pci_devq, devlist_entry, pci_links); devlist_entry->conf.pc_sel.pc_domain = cfg->domain; devlist_entry->conf.pc_sel.pc_bus = cfg->bus; devlist_entry->conf.pc_sel.pc_dev = cfg->slot; devlist_entry->conf.pc_sel.pc_func = cfg->func; devlist_entry->conf.pc_hdr = cfg->hdrtype; devlist_entry->conf.pc_subvendor = cfg->subvendor; devlist_entry->conf.pc_subdevice = cfg->subdevice; devlist_entry->conf.pc_vendor = cfg->vendor; devlist_entry->conf.pc_device = cfg->device; devlist_entry->conf.pc_class = cfg->baseclass; devlist_entry->conf.pc_subclass = cfg->subclass; devlist_entry->conf.pc_progif = cfg->progif; devlist_entry->conf.pc_revid = cfg->revid; pci_numdevs++; pci_generation++; return (devlist_entry); } #undef REG static void pci_ea_fill_info(device_t pcib, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, \ cfg->ea.ea_location + (n), w) int num_ent; int ptr; int a, b; uint32_t val; int ent_size; uint32_t dw[4]; uint64_t base, max_offset; struct pci_ea_entry *eae; if (cfg->ea.ea_location == 0) return; STAILQ_INIT(&cfg->ea.ea_entries); /* Determine the number of entries */ num_ent = REG(PCIR_EA_NUM_ENT, 2); num_ent &= PCIM_EA_NUM_ENT_MASK; /* Find the first entry to care of */ ptr = PCIR_EA_FIRST_ENT; /* Skip DWORD 2 for type 1 functions */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) ptr += 4; for (a = 0; a < num_ent; a++) { eae = malloc(sizeof(*eae), M_DEVBUF, M_WAITOK | M_ZERO); eae->eae_cfg_offset = cfg->ea.ea_location + ptr; /* Read a number of dwords in the entry */ val = REG(ptr, 4); ptr += 4; ent_size = (val & PCIM_EA_ES); for (b = 0; b < ent_size; b++) { dw[b] = REG(ptr, 4); ptr += 4; } eae->eae_flags = val; eae->eae_bei = (PCIM_EA_BEI & val) >> PCIM_EA_BEI_OFFSET; base = dw[0] & PCIM_EA_FIELD_MASK; max_offset = dw[1] | ~PCIM_EA_FIELD_MASK; b = 2; if (((dw[0] & PCIM_EA_IS_64) != 0) && (b < ent_size)) { base |= (uint64_t)dw[b] << 32UL; b++; } if (((dw[1] & PCIM_EA_IS_64) != 0) && (b < ent_size)) { max_offset |= (uint64_t)dw[b] << 32UL; b++; } eae->eae_base = base; eae->eae_max_offset = max_offset; STAILQ_INSERT_TAIL(&cfg->ea.ea_entries, eae, eae_link); if (bootverbose) { printf("PCI(EA) dev %04x:%04x, bei %d, flags #%x, base #%jx, max_offset #%jx\n", cfg->vendor, cfg->device, eae->eae_bei, eae->eae_flags, (uintmax_t)eae->eae_base, (uintmax_t)eae->eae_max_offset); } } } #undef REG static void pci_read_cap(device_t pcib, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w) #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) uint64_t addr; #endif uint32_t val; int ptr, nextptr, ptrptr; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: case PCIM_HDRTYPE_BRIDGE: ptrptr = PCIR_CAP_PTR; break; case PCIM_HDRTYPE_CARDBUS: ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */ break; default: return; /* no extended capabilities support */ } nextptr = REG(ptrptr, 1); /* sanity check? */ /* * Read capability entries. */ while (nextptr != 0) { /* Sanity check */ if (nextptr > 255) { printf("illegal PCI extended capability offset %d\n", nextptr); return; } /* Find the next entry */ ptr = nextptr; nextptr = REG(ptr + PCICAP_NEXTPTR, 1); /* Process this entry */ switch (REG(ptr + PCICAP_ID, 1)) { case PCIY_PMG: /* PCI power management */ if (cfg->pp.pp_cap == 0) { cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2); cfg->pp.pp_status = ptr + PCIR_POWER_STATUS; cfg->pp.pp_bse = ptr + PCIR_POWER_BSE; if ((nextptr - ptr) > PCIR_POWER_DATA) cfg->pp.pp_data = ptr + PCIR_POWER_DATA; } break; case PCIY_HT: /* HyperTransport */ /* Determine HT-specific capability type. */ val = REG(ptr + PCIR_HT_COMMAND, 2); if ((val & 0xe000) == PCIM_HTCAP_SLAVE) cfg->ht.ht_slave = ptr; #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) switch (val & PCIM_HTCMD_CAP_MASK) { case PCIM_HTCAP_MSI_MAPPING: if (!(val & PCIM_HTCMD_MSI_FIXED)) { /* Sanity check the mapping window. */ addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4); addr <<= 32; addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4); if (addr != MSI_INTEL_ADDR_BASE) device_printf(pcib, "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n", cfg->domain, cfg->bus, cfg->slot, cfg->func, (long long)addr); } else addr = MSI_INTEL_ADDR_BASE; cfg->ht.ht_msimap = ptr; cfg->ht.ht_msictrl = val; cfg->ht.ht_msiaddr = addr; break; } #endif break; case PCIY_MSI: /* PCI MSI */ cfg->msi.msi_location = ptr; cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2); cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl & PCIM_MSICTRL_MMC_MASK)>>1); break; case PCIY_MSIX: /* PCI MSI-X */ cfg->msix.msix_location = ptr; cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2); cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1; val = REG(ptr + PCIR_MSIX_TABLE, 4); cfg->msix.msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK; val = REG(ptr + PCIR_MSIX_PBA, 4); cfg->msix.msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK; break; case PCIY_VPD: /* PCI Vital Product Data */ cfg->vpd.vpd_reg = ptr; break; case PCIY_SUBVENDOR: /* Should always be true. */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) { val = REG(ptr + PCIR_SUBVENDCAP_ID, 4); cfg->subvendor = val & 0xffff; cfg->subdevice = val >> 16; } break; case PCIY_PCIX: /* PCI-X */ /* * Assume we have a PCI-X chipset if we have * at least one PCI-PCI bridge with a PCI-X * capability. Note that some systems with * PCI-express or HT chipsets might match on * this check as well. */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) pcix_chipset = 1; cfg->pcix.pcix_location = ptr; break; case PCIY_EXPRESS: /* PCI-express */ /* * Assume we have a PCI-express chipset if we have * at least one PCI-express device. */ pcie_chipset = 1; cfg->pcie.pcie_location = ptr; val = REG(ptr + PCIER_FLAGS, 2); cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE; break; case PCIY_EA: /* Enhanced Allocation */ cfg->ea.ea_location = ptr; pci_ea_fill_info(pcib, cfg); break; default: break; } } #if defined(__powerpc__) /* * Enable the MSI mapping window for all HyperTransport * slaves. PCI-PCI bridges have their windows enabled via * PCIB_MAP_MSI(). */ if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 && !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) { device_printf(pcib, "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n", cfg->domain, cfg->bus, cfg->slot, cfg->func); cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl, 2); } #endif /* REG and WREG use carry through to next functions */ } /* * PCI Vital Product Data */ #define PCI_VPD_TIMEOUT 1000000 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data) { int count = PCI_VPD_TIMEOUT; KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2); while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) { if (--count < 0) return (ENXIO); DELAY(1); /* limit looping */ } *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4)); return (0); } #if 0 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data) { int count = PCI_VPD_TIMEOUT; KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4); WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2); while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) { if (--count < 0) return (ENXIO); DELAY(1); /* limit looping */ } return (0); } #endif #undef PCI_VPD_TIMEOUT struct vpd_readstate { device_t pcib; pcicfgregs *cfg; uint32_t val; int bytesinval; int off; uint8_t cksum; }; static int vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data) { uint32_t reg; uint8_t byte; if (vrs->bytesinval == 0) { if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®)) return (ENXIO); vrs->val = le32toh(reg); vrs->off += 4; byte = vrs->val & 0xff; vrs->bytesinval = 3; } else { vrs->val = vrs->val >> 8; byte = vrs->val & 0xff; vrs->bytesinval--; } vrs->cksum += byte; *data = byte; return (0); } static void pci_read_vpd(device_t pcib, pcicfgregs *cfg) { struct vpd_readstate vrs; int state; int name; int remain; int i; int alloc, off; /* alloc/off for RO/W arrays */ int cksumvalid; int dflen; uint8_t byte; uint8_t byte2; /* init vpd reader */ vrs.bytesinval = 0; vrs.off = 0; vrs.pcib = pcib; vrs.cfg = cfg; vrs.cksum = 0; state = 0; name = remain = i = 0; /* shut up stupid gcc */ alloc = off = 0; /* shut up stupid gcc */ dflen = 0; /* shut up stupid gcc */ cksumvalid = -1; while (state >= 0) { if (vpd_nextbyte(&vrs, &byte)) { state = -2; break; } #if 0 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \ "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val, vrs.off, vrs.bytesinval, byte, state, remain, name, i); #endif switch (state) { case 0: /* item name */ if (byte & 0x80) { if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } remain = byte2; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } remain |= byte2 << 8; name = byte & 0x7f; } else { remain = byte & 0x7; name = (byte >> 3) & 0xf; } if (vrs.off + remain - vrs.bytesinval > 0x8000) { pci_printf(cfg, "VPD data overflow, remain %#x\n", remain); state = -1; break; } switch (name) { case 0x2: /* String */ cfg->vpd.vpd_ident = malloc(remain + 1, M_DEVBUF, M_WAITOK); i = 0; state = 1; break; case 0xf: /* End */ state = -1; break; case 0x10: /* VPD-R */ alloc = 8; off = 0; cfg->vpd.vpd_ros = malloc(alloc * sizeof(*cfg->vpd.vpd_ros), M_DEVBUF, M_WAITOK | M_ZERO); state = 2; break; case 0x11: /* VPD-W */ alloc = 8; off = 0; cfg->vpd.vpd_w = malloc(alloc * sizeof(*cfg->vpd.vpd_w), M_DEVBUF, M_WAITOK | M_ZERO); state = 5; break; default: /* Invalid data, abort */ state = -1; break; } break; case 1: /* Identifier String */ cfg->vpd.vpd_ident[i++] = byte; remain--; if (remain == 0) { cfg->vpd.vpd_ident[i] = '\0'; state = 0; } break; case 2: /* VPD-R Keyword Header */ if (off == alloc) { cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros, (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros), M_DEVBUF, M_WAITOK | M_ZERO); } cfg->vpd.vpd_ros[off].keyword[0] = byte; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } cfg->vpd.vpd_ros[off].keyword[1] = byte2; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } cfg->vpd.vpd_ros[off].len = dflen = byte2; if (dflen == 0 && strncmp(cfg->vpd.vpd_ros[off].keyword, "RV", 2) == 0) { /* * if this happens, we can't trust the rest * of the VPD. */ pci_printf(cfg, "bad keyword length: %d\n", dflen); cksumvalid = 0; state = -1; break; } else if (dflen == 0) { cfg->vpd.vpd_ros[off].value = malloc(1 * sizeof(*cfg->vpd.vpd_ros[off].value), M_DEVBUF, M_WAITOK); cfg->vpd.vpd_ros[off].value[0] = '\x00'; } else cfg->vpd.vpd_ros[off].value = malloc( (dflen + 1) * sizeof(*cfg->vpd.vpd_ros[off].value), M_DEVBUF, M_WAITOK); remain -= 3; i = 0; /* keep in sync w/ state 3's transistions */ if (dflen == 0 && remain == 0) state = 0; else if (dflen == 0) state = 2; else state = 3; break; case 3: /* VPD-R Keyword Value */ cfg->vpd.vpd_ros[off].value[i++] = byte; if (strncmp(cfg->vpd.vpd_ros[off].keyword, "RV", 2) == 0 && cksumvalid == -1) { if (vrs.cksum == 0) cksumvalid = 1; else { if (bootverbose) pci_printf(cfg, "bad VPD cksum, remain %hhu\n", vrs.cksum); cksumvalid = 0; state = -1; break; } } dflen--; remain--; /* keep in sync w/ state 2's transistions */ if (dflen == 0) cfg->vpd.vpd_ros[off++].value[i++] = '\0'; if (dflen == 0 && remain == 0) { cfg->vpd.vpd_rocnt = off; cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros, off * sizeof(*cfg->vpd.vpd_ros), M_DEVBUF, M_WAITOK | M_ZERO); state = 0; } else if (dflen == 0) state = 2; break; case 4: remain--; if (remain == 0) state = 0; break; case 5: /* VPD-W Keyword Header */ if (off == alloc) { cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w, (alloc *= 2) * sizeof(*cfg->vpd.vpd_w), M_DEVBUF, M_WAITOK | M_ZERO); } cfg->vpd.vpd_w[off].keyword[0] = byte; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } cfg->vpd.vpd_w[off].keyword[1] = byte2; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } cfg->vpd.vpd_w[off].len = dflen = byte2; cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval; cfg->vpd.vpd_w[off].value = malloc((dflen + 1) * sizeof(*cfg->vpd.vpd_w[off].value), M_DEVBUF, M_WAITOK); remain -= 3; i = 0; /* keep in sync w/ state 6's transistions */ if (dflen == 0 && remain == 0) state = 0; else if (dflen == 0) state = 5; else state = 6; break; case 6: /* VPD-W Keyword Value */ cfg->vpd.vpd_w[off].value[i++] = byte; dflen--; remain--; /* keep in sync w/ state 5's transistions */ if (dflen == 0) cfg->vpd.vpd_w[off++].value[i++] = '\0'; if (dflen == 0 && remain == 0) { cfg->vpd.vpd_wcnt = off; cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w, off * sizeof(*cfg->vpd.vpd_w), M_DEVBUF, M_WAITOK | M_ZERO); state = 0; } else if (dflen == 0) state = 5; break; default: pci_printf(cfg, "invalid state: %d\n", state); state = -1; break; } } if (cksumvalid == 0 || state < -1) { /* read-only data bad, clean up */ if (cfg->vpd.vpd_ros != NULL) { for (off = 0; cfg->vpd.vpd_ros[off].value; off++) free(cfg->vpd.vpd_ros[off].value, M_DEVBUF); free(cfg->vpd.vpd_ros, M_DEVBUF); cfg->vpd.vpd_ros = NULL; } } if (state < -1) { /* I/O error, clean up */ pci_printf(cfg, "failed to read VPD data.\n"); if (cfg->vpd.vpd_ident != NULL) { free(cfg->vpd.vpd_ident, M_DEVBUF); cfg->vpd.vpd_ident = NULL; } if (cfg->vpd.vpd_w != NULL) { for (off = 0; cfg->vpd.vpd_w[off].value; off++) free(cfg->vpd.vpd_w[off].value, M_DEVBUF); free(cfg->vpd.vpd_w, M_DEVBUF); cfg->vpd.vpd_w = NULL; } } cfg->vpd.vpd_cached = 1; #undef REG #undef WREG } int pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(dev), cfg); *identptr = cfg->vpd.vpd_ident; if (*identptr == NULL) return (ENXIO); return (0); } int pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw, const char **vptr) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; int i; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(dev), cfg); for (i = 0; i < cfg->vpd.vpd_rocnt; i++) if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword, sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) { *vptr = cfg->vpd.vpd_ros[i].value; return (0); } *vptr = NULL; return (ENXIO); } struct pcicfg_vpd * pci_fetch_vpd_list(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg); return (&cfg->vpd); } /* * Find the requested HyperTransport capability and return the offset * in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg) { int ptr, error; uint16_t val; error = pci_find_cap(child, PCIY_HT, &ptr); if (error) return (error); /* * Traverse the capabilities list checking each HT capability * to see if it matches the requested HT capability. */ for (;;) { val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2); if (capability == PCIM_HTCAP_SLAVE || capability == PCIM_HTCAP_HOST) val &= 0xe000; else val &= PCIM_HTCMD_CAP_MASK; if (val == capability) { if (capreg != NULL) *capreg = ptr; return (0); } /* Skip to the next HT capability. */ if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0) break; } return (ENOENT); } /* * Find the next requested HyperTransport capability after start and return * the offset in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_next_htcap_method(device_t dev, device_t child, int capability, int start, int *capreg) { int ptr; uint16_t val; KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == PCIY_HT, ("start capability is not HyperTransport capability")); ptr = start; /* * Traverse the capabilities list checking each HT capability * to see if it matches the requested HT capability. */ for (;;) { /* Skip to the next HT capability. */ if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0) break; val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2); if (capability == PCIM_HTCAP_SLAVE || capability == PCIM_HTCAP_HOST) val &= 0xe000; else val &= PCIM_HTCMD_CAP_MASK; if (val == capability) { if (capreg != NULL) *capreg = ptr; return (0); } } return (ENOENT); } /* * Find the requested capability and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_cap_method(device_t dev, device_t child, int capability, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t status; uint8_t ptr; /* * Check the CAP_LIST bit of the PCI status register first. */ status = pci_read_config(child, PCIR_STATUS, 2); if (!(status & PCIM_STATUS_CAPPRESENT)) return (ENXIO); /* * Determine the start pointer of the capabilities list. */ switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: case PCIM_HDRTYPE_BRIDGE: ptr = PCIR_CAP_PTR; break; case PCIM_HDRTYPE_CARDBUS: ptr = PCIR_CAP_PTR_2; break; default: /* XXX: panic? */ return (ENXIO); /* no extended capabilities support */ } ptr = pci_read_config(child, ptr, 1); /* * Traverse the capabilities list. */ while (ptr != 0) { if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); } return (ENOENT); } /* * Find the next requested capability after start and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_next_cap_method(device_t dev, device_t child, int capability, int start, int *capreg) { uint8_t ptr; KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == capability, ("start capability is not expected capability")); ptr = pci_read_config(child, start + PCICAP_NEXTPTR, 1); while (ptr != 0) { if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); } return (ENOENT); } /* * Find the requested extended capability and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_extcap_method(device_t dev, device_t child, int capability, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t ecap; uint16_t ptr; /* Only supported for PCI-express devices. */ if (cfg->pcie.pcie_location == 0) return (ENXIO); ptr = PCIR_EXTCAP; ecap = pci_read_config(child, ptr, 4); if (ecap == 0xffffffff || ecap == 0) return (ENOENT); for (;;) { if (PCI_EXTCAP_ID(ecap) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = PCI_EXTCAP_NEXTPTR(ecap); if (ptr == 0) break; ecap = pci_read_config(child, ptr, 4); } return (ENOENT); } /* * Find the next requested extended capability after start and return the * offset in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_next_extcap_method(device_t dev, device_t child, int capability, int start, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t ecap; uint16_t ptr; /* Only supported for PCI-express devices. */ if (cfg->pcie.pcie_location == 0) return (ENXIO); ecap = pci_read_config(child, start, 4); KASSERT(PCI_EXTCAP_ID(ecap) == capability, ("start extended capability is not expected capability")); ptr = PCI_EXTCAP_NEXTPTR(ecap); while (ptr != 0) { ecap = pci_read_config(child, ptr, 4); if (PCI_EXTCAP_ID(ecap) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = PCI_EXTCAP_NEXTPTR(ecap); } return (ENOENT); } /* * Support for MSI-X message interrupts. */ static void pci_write_msix_entry(device_t dev, u_int index, uint64_t address, uint32_t data) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_table_offset + index * 16; bus_write_4(msix->msix_table_res, offset, address & 0xffffffff); bus_write_4(msix->msix_table_res, offset + 4, address >> 32); bus_write_4(msix->msix_table_res, offset + 8, data); } void pci_enable_msix_method(device_t dev, device_t child, u_int index, uint64_t address, uint32_t data) { if (pci_msix_rewrite_table) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; /* * Some VM hosts require MSIX to be disabled in the * control register before updating the MSIX table * entries are allowed. It is not enough to only * disable MSIX while updating a single entry. MSIX * must be disabled while updating all entries in the * table. */ pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl & ~PCIM_MSIXCTRL_MSIX_ENABLE, 2); pci_resume_msix(child); } else pci_write_msix_entry(child, index, address, data); /* Enable MSI -> HT mapping. */ pci_ht_map_msi(child, address); } void pci_mask_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, val; KASSERT(msix->msix_msgnum > index, ("bogus index")); offset = msix->msix_table_offset + index * 16 + 12; val = bus_read_4(msix->msix_table_res, offset); val |= PCIM_MSIX_VCTRL_MASK; /* * Some devices (e.g. Samsung PM961) do not support reads of this * register, so always write the new value. */ bus_write_4(msix->msix_table_res, offset, val); } void pci_unmask_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, val; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_table_offset + index * 16 + 12; val = bus_read_4(msix->msix_table_res, offset); val &= ~PCIM_MSIX_VCTRL_MASK; /* * Some devices (e.g. Samsung PM961) do not support reads of this * register, so always write the new value. */ bus_write_4(msix->msix_table_res, offset, val); } int pci_pending_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, bit; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_pba_offset + (index / 32) * 4; bit = 1 << index % 32; return (bus_read_4(msix->msix_pba_res, offset) & bit); } /* * Restore MSI-X registers and table during resume. If MSI-X is * enabled then walk the virtual table to restore the actual MSI-X * table. */ static void pci_resume_msix(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct msix_table_entry *mte; struct msix_vector *mv; int i; if (msix->msix_alloc > 0) { /* First, mask all vectors. */ for (i = 0; i < msix->msix_msgnum; i++) pci_mask_msix(dev, i); /* Second, program any messages with at least one handler. */ for (i = 0; i < msix->msix_table_len; i++) { mte = &msix->msix_table[i]; if (mte->mte_vector == 0 || mte->mte_handlers == 0) continue; mv = &msix->msix_vectors[mte->mte_vector - 1]; pci_write_msix_entry(dev, i, mv->mv_address, mv->mv_data); pci_unmask_msix(dev, i); } } pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl, 2); } /* * Attempt to allocate *count MSI-X messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at rid 1. */ int pci_alloc_msix_method(device_t dev, device_t child, int *count) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; int actual, error, i, irq, max; /* Don't let count == 0 get us into trouble. */ if (*count == 0) return (EINVAL); /* If rid 0 is allocated, then fail. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) return (ENXIO); /* Already have allocated messages? */ if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) return (ENXIO); /* If MSI-X is blacklisted for this system, fail. */ if (pci_msix_blacklisted()) return (ENXIO); /* MSI-X capability present? */ if (cfg->msix.msix_location == 0 || !pci_do_msix) return (ENODEV); /* Make sure the appropriate BARs are mapped. */ rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, cfg->msix.msix_table_bar); if (rle == NULL || rle->res == NULL || !(rman_get_flags(rle->res) & RF_ACTIVE)) return (ENXIO); cfg->msix.msix_table_res = rle->res; if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) { rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, cfg->msix.msix_pba_bar); if (rle == NULL || rle->res == NULL || !(rman_get_flags(rle->res) & RF_ACTIVE)) return (ENXIO); } cfg->msix.msix_pba_res = rle->res; if (bootverbose) device_printf(child, "attempting to allocate %d MSI-X vectors (%d supported)\n", *count, cfg->msix.msix_msgnum); max = min(*count, cfg->msix.msix_msgnum); for (i = 0; i < max; i++) { /* Allocate a message. */ error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq); if (error) { if (i == 0) return (error); break; } resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, irq, 1); } actual = i; if (bootverbose) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1); if (actual == 1) device_printf(child, "using IRQ %ju for MSI-X\n", rle->start); else { int run; /* * Be fancy and try to print contiguous runs of * IRQ values as ranges. 'irq' is the previous IRQ. * 'run' is true if we are in a range. */ device_printf(child, "using IRQs %ju", rle->start); irq = rle->start; run = 0; for (i = 1; i < actual; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Still in a run? */ if (rle->start == irq + 1) { run = 1; irq++; continue; } /* Finish previous range. */ if (run) { printf("-%d", irq); run = 0; } /* Start new range. */ printf(",%ju", rle->start); irq = rle->start; } /* Unfinished range? */ if (run) printf("-%d", irq); printf(" for MSI-X\n"); } } /* Mask all vectors. */ for (i = 0; i < cfg->msix.msix_msgnum; i++) pci_mask_msix(child, i); /* Allocate and initialize vector data and virtual table. */ cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual, M_DEVBUF, M_WAITOK | M_ZERO); cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < actual; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); cfg->msix.msix_vectors[i].mv_irq = rle->start; cfg->msix.msix_table[i].mte_vector = i + 1; } /* Update control register to enable MSI-X. */ cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL, cfg->msix.msix_ctrl, 2); /* Update counts of alloc'd messages. */ cfg->msix.msix_alloc = actual; cfg->msix.msix_table_len = actual; *count = actual; return (0); } /* * By default, pci_alloc_msix() will assign the allocated IRQ * resources consecutively to the first N messages in the MSI-X table. * However, device drivers may want to use different layouts if they * either receive fewer messages than they asked for, or they wish to * populate the MSI-X table sparsely. This method allows the driver * to specify what layout it wants. It must be called after a * successful pci_alloc_msix() but before any of the associated * SYS_RES_IRQ resources are allocated via bus_alloc_resource(). * * The 'vectors' array contains 'count' message vectors. The array * maps directly to the MSI-X table in that index 0 in the array * specifies the vector for the first message in the MSI-X table, etc. * The vector value in each array index can either be 0 to indicate * that no vector should be assigned to a message slot, or it can be a * number from 1 to N (where N is the count returned from a * succcessful call to pci_alloc_msix()) to indicate which message * vector (IRQ) to be used for the corresponding message. * * On successful return, each message with a non-zero vector will have * an associated SYS_RES_IRQ whose rid is equal to the array index + * 1. Additionally, if any of the IRQs allocated via the previous * call to pci_alloc_msix() are not used in the mapping, those IRQs * will be freed back to the system automatically. * * For example, suppose a driver has a MSI-X table with 6 messages and * asks for 6 messages, but pci_alloc_msix() only returns a count of * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and * C. After the call to pci_alloc_msix(), the device will be setup to * have an MSI-X table of ABC--- (where - means no vector assigned). * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 }, * then the MSI-X table will look like A-AB-B, and the 'C' vector will * be freed back to the system. This device will also have valid * SYS_RES_IRQ rids of 1, 3, 4, and 6. * * In any case, the SYS_RES_IRQ rid X will always map to the message * at MSI-X table index X - 1 and will only be valid if a vector is * assigned to that table entry. */ int pci_remap_msix_method(device_t dev, device_t child, int count, const u_int *vectors) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct resource_list_entry *rle; int i, irq, j, *used; /* * Have to have at least one message in the table but the * table can't be bigger than the actual MSI-X table in the * device. */ if (count == 0 || count > msix->msix_msgnum) return (EINVAL); /* Sanity check the vectors. */ for (i = 0; i < count; i++) if (vectors[i] > msix->msix_alloc) return (EINVAL); /* * Make sure there aren't any holes in the vectors to be used. * It's a big pain to support it, and it doesn't really make * sense anyway. Also, at least one vector must be used. */ used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) if (vectors[i] != 0) used[vectors[i] - 1] = 1; for (i = 0; i < msix->msix_alloc - 1; i++) if (used[i] == 0 && used[i + 1] == 1) { free(used, M_DEVBUF); return (EINVAL); } if (used[0] != 1) { free(used, M_DEVBUF); return (EINVAL); } /* Make sure none of the resources are allocated. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; if (msix->msix_table[i].mte_handlers > 0) { free(used, M_DEVBUF); return (EBUSY); } rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing resource")); if (rle->res != NULL) { free(used, M_DEVBUF); return (EBUSY); } } /* Free the existing resource list entries. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); } /* * Build the new virtual table keeping track of which vectors are * used. */ free(msix->msix_table, M_DEVBUF); msix->msix_table = malloc(sizeof(struct msix_table_entry) * count, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) msix->msix_table[i].mte_vector = vectors[i]; msix->msix_table_len = count; /* Free any unused IRQs and resize the vectors array if necessary. */ j = msix->msix_alloc - 1; if (used[j] == 0) { struct msix_vector *vec; while (used[j] == 0) { PCIB_RELEASE_MSIX(device_get_parent(dev), child, msix->msix_vectors[j].mv_irq); j--; } vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF, M_WAITOK); bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) * (j + 1)); free(msix->msix_vectors, M_DEVBUF); msix->msix_vectors = vec; msix->msix_alloc = j + 1; } free(used, M_DEVBUF); /* Map the IRQs onto the rids. */ for (i = 0; i < count; i++) { if (vectors[i] == 0) continue; irq = msix->msix_vectors[vectors[i] - 1].mv_irq; resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, irq, 1); } if (bootverbose) { device_printf(child, "Remapped MSI-X IRQs as: "); for (i = 0; i < count; i++) { if (i != 0) printf(", "); if (vectors[i] == 0) printf("---"); else printf("%d", msix->msix_vectors[vectors[i] - 1].mv_irq); } printf("\n"); } return (0); } static int pci_release_msix(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct resource_list_entry *rle; int i; /* Do we have any messages to release? */ if (msix->msix_alloc == 0) return (ENODEV); /* Make sure none of the resources are allocated. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; if (msix->msix_table[i].mte_handlers > 0) return (EBUSY); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing resource")); if (rle->res != NULL) return (EBUSY); } /* Update control register to disable MSI-X. */ msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl, 2); /* Free the resource list entries. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); } free(msix->msix_table, M_DEVBUF); msix->msix_table_len = 0; /* Release the IRQs. */ for (i = 0; i < msix->msix_alloc; i++) PCIB_RELEASE_MSIX(device_get_parent(dev), child, msix->msix_vectors[i].mv_irq); free(msix->msix_vectors, M_DEVBUF); msix->msix_alloc = 0; return (0); } /* * Return the max supported MSI-X messages this device supports. * Basically, assuming the MD code can alloc messages, this function * should return the maximum value that pci_alloc_msix() can return. * Thus, it is subject to the tunables, etc. */ int pci_msix_count_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_msgnum); return (0); } int pci_msix_pba_bar_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_pba_bar); return (-1); } int pci_msix_table_bar_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_table_bar); return (-1); } /* * HyperTransport MSI mapping control */ void pci_ht_map_msi(device_t dev, uint64_t addr) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_ht *ht = &dinfo->cfg.ht; if (!ht->ht_msimap) return; if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) && ht->ht_msiaddr >> 20 == addr >> 20) { /* Enable MSI -> HT mapping. */ ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, ht->ht_msictrl, 2); } if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) { /* Disable MSI -> HT mapping. */ ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE; pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, ht->ht_msictrl, 2); } } int pci_get_relaxed_ordering_enabled(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_RELAXED_ORD_ENABLE; return (val != 0); } int pci_get_max_payload(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_MAX_PAYLOAD; val >>= 5; return (1 << (val + 7)); } int pci_get_max_read_req(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_MAX_READ_REQUEST; val >>= 12; return (1 << (val + 7)); } int pci_set_max_read_req(device_t dev, int size) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); if (size < 128) size = 128; if (size > 4096) size = 4096; size = (1 << (fls(size) - 1)); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= ~PCIEM_CTL_MAX_READ_REQUEST; val |= (fls(size) - 8) << 12; pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2); return (size); } uint32_t pcie_read_config(device_t dev, int reg, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) { if (width == 2) return (0xffff); return (0xffffffff); } return (pci_read_config(dev, cap + reg, width)); } void pcie_write_config(device_t dev, int reg, uint32_t value, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return; pci_write_config(dev, cap + reg, value, width); } /* * Adjusts a PCI-e capability register by clearing the bits in mask * and setting the bits in (value & mask). Bits not set in mask are * not adjusted. * * Returns the old value on success or all ones on failure. */ uint32_t pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint32_t old, new; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) { if (width == 2) return (0xffff); return (0xffffffff); } old = pci_read_config(dev, cap + reg, width); new = old & ~mask; new |= (value & mask); pci_write_config(dev, cap + reg, new, width); return (old); } /* * Support for MSI message signalled interrupts. */ void pci_enable_msi_method(device_t dev, device_t child, uint64_t address, uint16_t data) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; /* Write data and address values. */ pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR, address & 0xffffffff, 4); if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH, address >> 32, 4); pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT, data, 2); } else pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data, 2); /* Enable MSI in the control register. */ msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); /* Enable MSI -> HT mapping. */ pci_ht_map_msi(child, address); } void pci_disable_msi_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; /* Disable MSI -> HT mapping. */ pci_ht_map_msi(child, 0); /* Disable MSI in the control register. */ msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); } /* * Restore MSI registers during resume. If MSI is enabled then * restore the data and address registers in addition to the control * register. */ static void pci_resume_msi(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msi *msi = &dinfo->cfg.msi; uint64_t address; uint16_t data; if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) { address = msi->msi_addr; data = msi->msi_data; pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR, address & 0xffffffff, 4); if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH, address >> 32, 4); pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT, data, 2); } else pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data, 2); } pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); } static int pci_remap_intr_method(device_t bus, device_t dev, u_int irq) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; struct msix_table_entry *mte; struct msix_vector *mv; uint64_t addr; uint32_t data; int error, i, j; /* * Handle MSI first. We try to find this IRQ among our list * of MSI IRQs. If we find it, we request updated address and * data registers and apply the results. */ if (cfg->msi.msi_alloc > 0) { /* If we don't have any active handlers, nothing to do. */ if (cfg->msi.msi_handlers == 0) return (0); for (i = 0; i < cfg->msi.msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); if (rle->start == irq) { error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, &addr, &data); if (error) return (error); pci_disable_msi(dev); dinfo->cfg.msi.msi_addr = addr; dinfo->cfg.msi.msi_data = data; pci_enable_msi(dev, addr, data); return (0); } } return (ENOENT); } /* * For MSI-X, we check to see if we have this IRQ. If we do, * we request the updated mapping info. If that works, we go * through all the slots that use this IRQ and update them. */ if (cfg->msix.msix_alloc > 0) { for (i = 0; i < cfg->msix.msix_alloc; i++) { mv = &cfg->msix.msix_vectors[i]; if (mv->mv_irq == irq) { error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, &addr, &data); if (error) return (error); mv->mv_address = addr; mv->mv_data = data; for (j = 0; j < cfg->msix.msix_table_len; j++) { mte = &cfg->msix.msix_table[j]; if (mte->mte_vector != i + 1) continue; if (mte->mte_handlers == 0) continue; pci_mask_msix(dev, j); pci_enable_msix(dev, j, addr, data); pci_unmask_msix(dev, j); } } } return (ENOENT); } return (ENOENT); } /* * Returns true if the specified device is blacklisted because MSI * doesn't work. */ int pci_msi_device_blacklisted(device_t dev) { if (!pci_honor_msi_blacklist) return (0); return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI)); } /* * Determine if MSI is blacklisted globally on this system. Currently, * we just check for blacklisted chipsets as represented by the * host-PCI bridge at device 0:0:0. In the future, it may become * necessary to check other system attributes, such as the kenv values * that give the motherboard manufacturer and model number. */ static int pci_msi_blacklisted(void) { device_t dev; if (!pci_honor_msi_blacklist) return (0); /* Blacklist all non-PCI-express and non-PCI-X chipsets. */ if (!(pcie_chipset || pcix_chipset)) { if (vm_guest != VM_GUEST_NO) { /* * Whitelist older chipsets in virtual * machines known to support MSI. */ dev = pci_find_bsf(0, 0, 0); if (dev != NULL) return (!pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_ENABLE_MSI_VM)); } return (1); } dev = pci_find_bsf(0, 0, 0); if (dev != NULL) return (pci_msi_device_blacklisted(dev)); return (0); } /* * Returns true if the specified device is blacklisted because MSI-X * doesn't work. Note that this assumes that if MSI doesn't work, * MSI-X doesn't either. */ int pci_msix_device_blacklisted(device_t dev) { if (!pci_honor_msi_blacklist) return (0); if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX)) return (1); return (pci_msi_device_blacklisted(dev)); } /* * Determine if MSI-X is blacklisted globally on this system. If MSI * is blacklisted, assume that MSI-X is as well. Check for additional * chipsets where MSI works but MSI-X does not. */ static int pci_msix_blacklisted(void) { device_t dev; if (!pci_honor_msi_blacklist) return (0); dev = pci_find_bsf(0, 0, 0); if (dev != NULL && pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX)) return (1); return (pci_msi_blacklisted()); } /* * Attempt to allocate *count MSI messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at a rid 1. */ int pci_alloc_msi_method(device_t dev, device_t child, int *count) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; int actual, error, i, irqs[32]; uint16_t ctrl; /* Don't let count == 0 get us into trouble. */ if (*count == 0) return (EINVAL); /* If rid 0 is allocated, then fail. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) return (ENXIO); /* Already have allocated messages? */ if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) return (ENXIO); /* If MSI is blacklisted for this system, fail. */ if (pci_msi_blacklisted()) return (ENXIO); /* MSI capability present? */ if (cfg->msi.msi_location == 0 || !pci_do_msi) return (ENODEV); if (bootverbose) device_printf(child, "attempting to allocate %d MSI vectors (%d supported)\n", *count, cfg->msi.msi_msgnum); /* Don't ask for more than the device supports. */ actual = min(*count, cfg->msi.msi_msgnum); /* Don't ask for more than 32 messages. */ actual = min(actual, 32); /* MSI requires power of 2 number of messages. */ if (!powerof2(actual)) return (EINVAL); for (;;) { /* Try to allocate N messages. */ error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual, actual, irqs); if (error == 0) break; if (actual == 1) return (error); /* Try N / 2. */ actual >>= 1; } /* * We now have N actual messages mapped onto SYS_RES_IRQ * resources in the irqs[] array, so add new resources * starting at rid 1. */ for (i = 0; i < actual; i++) resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irqs[i], irqs[i], 1); if (bootverbose) { if (actual == 1) device_printf(child, "using IRQ %d for MSI\n", irqs[0]); else { int run; /* * Be fancy and try to print contiguous runs * of IRQ values as ranges. 'run' is true if * we are in a range. */ device_printf(child, "using IRQs %d", irqs[0]); run = 0; for (i = 1; i < actual; i++) { /* Still in a run? */ if (irqs[i] == irqs[i - 1] + 1) { run = 1; continue; } /* Finish previous range. */ if (run) { printf("-%d", irqs[i - 1]); run = 0; } /* Start new range. */ printf(",%d", irqs[i]); } /* Unfinished range? */ if (run) printf("-%d", irqs[actual - 1]); printf(" for MSI\n"); } } /* Update control register with actual count. */ ctrl = cfg->msi.msi_ctrl; ctrl &= ~PCIM_MSICTRL_MME_MASK; ctrl |= (ffs(actual) - 1) << 4; cfg->msi.msi_ctrl = ctrl; pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2); /* Update counts of alloc'd messages. */ cfg->msi.msi_alloc = actual; cfg->msi.msi_handlers = 0; *count = actual; return (0); } /* Release the MSI messages associated with this device. */ int pci_release_msi_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; struct resource_list_entry *rle; int error, i, irqs[32]; /* Try MSI-X first. */ error = pci_release_msix(dev, child); if (error != ENODEV) return (error); /* Do we have any messages to release? */ if (msi->msi_alloc == 0) return (ENODEV); KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages")); /* Make sure none of the resources are allocated. */ if (msi->msi_handlers > 0) return (EBUSY); for (i = 0; i < msi->msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing MSI resource")); if (rle->res != NULL) return (EBUSY); irqs[i] = rle->start; } /* Update control register with 0 count. */ KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE), ("%s: MSI still enabled", __func__)); msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); /* Release the messages. */ PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs); for (i = 0; i < msi->msi_alloc; i++) resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Update alloc count. */ msi->msi_alloc = 0; msi->msi_addr = 0; msi->msi_data = 0; return (0); } /* * Return the max supported MSI messages this device supports. * Basically, assuming the MD code can alloc messages, this function * should return the maximum value that pci_alloc_msi() can return. * Thus, it is subject to the tunables, etc. */ int pci_msi_count_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; if (pci_do_msi && msi->msi_location != 0) return (msi->msi_msgnum); return (0); } /* free pcicfgregs structure and all depending data structures */ int pci_freecfg(struct pci_devinfo *dinfo) { struct devlist *devlist_head; struct pci_map *pm, *next; int i; devlist_head = &pci_devq; if (dinfo->cfg.vpd.vpd_reg) { free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF); for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++) free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF); free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF); for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++) free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF); free(dinfo->cfg.vpd.vpd_w, M_DEVBUF); } STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) { free(pm, M_DEVBUF); } STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links); free(dinfo, M_DEVBUF); /* increment the generation count */ pci_generation++; /* we're losing one device */ pci_numdevs--; return (0); } /* * PCI power manangement */ int pci_set_powerstate_method(device_t dev, device_t child, int state) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; int oldstate, highest, delay; if (cfg->pp.pp_cap == 0) return (EOPNOTSUPP); /* * Optimize a no state change request away. While it would be OK to * write to the hardware in theory, some devices have shown odd * behavior when going from D3 -> D3. */ oldstate = pci_get_powerstate(child); if (oldstate == state) return (0); /* * The PCI power management specification states that after a state * transition between PCI power states, system software must * guarantee a minimal delay before the function accesses the device. * Compute the worst case delay that we need to guarantee before we * access the device. Many devices will be responsive much more * quickly than this delay, but there are some that don't respond * instantly to state changes. Transitions to/from D3 state require * 10ms, while D2 requires 200us, and D0/1 require none. The delay * is done below with DELAY rather than a sleeper function because * this function can be called from contexts where we cannot sleep. */ highest = (oldstate > state) ? oldstate : state; if (highest == PCI_POWERSTATE_D3) delay = 10000; else if (highest == PCI_POWERSTATE_D2) delay = 200; else delay = 0; status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2) & ~PCIM_PSTAT_DMASK; switch (state) { case PCI_POWERSTATE_D0: status |= PCIM_PSTAT_D0; break; case PCI_POWERSTATE_D1: if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0) return (EOPNOTSUPP); status |= PCIM_PSTAT_D1; break; case PCI_POWERSTATE_D2: if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0) return (EOPNOTSUPP); status |= PCIM_PSTAT_D2; break; case PCI_POWERSTATE_D3: status |= PCIM_PSTAT_D3; break; default: return (EINVAL); } if (bootverbose) pci_printf(cfg, "Transition from D%d to D%d\n", oldstate, state); PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2); if (delay) DELAY(delay); return (0); } int pci_get_powerstate_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; int result; if (cfg->pp.pp_cap != 0) { status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2); switch (status & PCIM_PSTAT_DMASK) { case PCIM_PSTAT_D0: result = PCI_POWERSTATE_D0; break; case PCIM_PSTAT_D1: result = PCI_POWERSTATE_D1; break; case PCIM_PSTAT_D2: result = PCI_POWERSTATE_D2; break; case PCIM_PSTAT_D3: result = PCI_POWERSTATE_D3; break; default: result = PCI_POWERSTATE_UNKNOWN; break; } } else { /* No support, device is always at D0 */ result = PCI_POWERSTATE_D0; } return (result); } /* * Some convenience functions for PCI device drivers. */ static __inline void pci_set_command_bit(device_t dev, device_t child, uint16_t bit) { uint16_t command; command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); command |= bit; PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); } static __inline void pci_clear_command_bit(device_t dev, device_t child, uint16_t bit) { uint16_t command; command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); command &= ~bit; PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); } int pci_enable_busmaster_method(device_t dev, device_t child) { pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); return (0); } int pci_disable_busmaster_method(device_t dev, device_t child) { pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); return (0); } int pci_enable_io_method(device_t dev, device_t child, int space) { uint16_t bit; switch(space) { case SYS_RES_IOPORT: bit = PCIM_CMD_PORTEN; break; case SYS_RES_MEMORY: bit = PCIM_CMD_MEMEN; break; default: return (EINVAL); } pci_set_command_bit(dev, child, bit); return (0); } int pci_disable_io_method(device_t dev, device_t child, int space) { uint16_t bit; switch(space) { case SYS_RES_IOPORT: bit = PCIM_CMD_PORTEN; break; case SYS_RES_MEMORY: bit = PCIM_CMD_MEMEN; break; default: return (EINVAL); } pci_clear_command_bit(dev, child, bit); return (0); } /* * New style pci driver. Parent device is either a pci-host-bridge or a * pci-pci-bridge. Both kinds are represented by instances of pcib. */ void pci_print_verbose(struct pci_devinfo *dinfo) { if (bootverbose) { pcicfgregs *cfg = &dinfo->cfg; printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n", cfg->vendor, cfg->device, cfg->revid); printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n", cfg->domain, cfg->bus, cfg->slot, cfg->func); printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n", cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype, cfg->mfdev); printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n", cfg->cmdreg, cfg->statreg, cfg->cachelnsz); printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n", cfg->lattimer, cfg->lattimer * 30, cfg->mingnt, cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250); if (cfg->intpin > 0) printf("\tintpin=%c, irq=%d\n", cfg->intpin +'a' -1, cfg->intline); if (cfg->pp.pp_cap) { uint16_t status; status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2); printf("\tpowerspec %d supports D0%s%s D3 current D%d\n", cfg->pp.pp_cap & PCIM_PCAP_SPEC, cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "", cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "", status & PCIM_PSTAT_DMASK); } if (cfg->msi.msi_location) { int ctrl; ctrl = cfg->msi.msi_ctrl; printf("\tMSI supports %d message%s%s%s\n", cfg->msi.msi_msgnum, (cfg->msi.msi_msgnum == 1) ? "" : "s", (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "", (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":""); } if (cfg->msix.msix_location) { printf("\tMSI-X supports %d message%s ", cfg->msix.msix_msgnum, (cfg->msix.msix_msgnum == 1) ? "" : "s"); if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar) printf("in map 0x%x\n", cfg->msix.msix_table_bar); else printf("in maps 0x%x and 0x%x\n", cfg->msix.msix_table_bar, cfg->msix.msix_pba_bar); } } } static int pci_porten(device_t dev) { return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0; } static int pci_memen(device_t dev) { return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0; } void pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp, int *bar64) { struct pci_devinfo *dinfo; pci_addr_t map, testval; int ln2range; uint16_t cmd; /* * The device ROM BAR is special. It is always a 32-bit * memory BAR. Bit 0 is special and should not be set when * sizing the BAR. */ dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, reg)) { map = pci_read_config(dev, reg, 4); pci_write_config(dev, reg, 0xfffffffe, 4); testval = pci_read_config(dev, reg, 4); pci_write_config(dev, reg, map, 4); *mapp = map; *testvalp = testval; if (bar64 != NULL) *bar64 = 0; return; } map = pci_read_config(dev, reg, 4); ln2range = pci_maprange(map); if (ln2range == 64) map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; /* * Disable decoding via the command register before * determining the BAR's length since we will be placing it in * a weird state. */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); /* * Determine the BAR's length by writing all 1's. The bottom * log_2(size) bits of the BAR will stick as 0 when we read * the value back. * * NB: according to the PCI Local Bus Specification, rev. 3.0: * "Software writes 0FFFFFFFFh to both registers, reads them back, * and combines the result into a 64-bit value." (section 6.2.5.1) * * Writes to both registers must be performed before attempting to * read back the size value. */ testval = 0; pci_write_config(dev, reg, 0xffffffff, 4); if (ln2range == 64) { pci_write_config(dev, reg + 4, 0xffffffff, 4); testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; } testval |= pci_read_config(dev, reg, 4); /* * Restore the original value of the BAR. We may have reprogrammed * the BAR of the low-level console device and when booting verbose, * we need the console device addressable. */ pci_write_config(dev, reg, map, 4); if (ln2range == 64) pci_write_config(dev, reg + 4, map >> 32, 4); pci_write_config(dev, PCIR_COMMAND, cmd, 2); *mapp = map; *testvalp = testval; if (bar64 != NULL) *bar64 = (ln2range == 64); } static void pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base) { struct pci_devinfo *dinfo; int ln2range; /* The device ROM BAR is always a 32-bit memory BAR. */ dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) ln2range = 32; else ln2range = pci_maprange(pm->pm_value); pci_write_config(dev, pm->pm_reg, base, 4); if (ln2range == 64) pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4); pm->pm_value = pci_read_config(dev, pm->pm_reg, 4); if (ln2range == 64) pm->pm_value |= (pci_addr_t)pci_read_config(dev, pm->pm_reg + 4, 4) << 32; } struct pci_map * pci_find_bar(device_t dev, int reg) { struct pci_devinfo *dinfo; struct pci_map *pm; dinfo = device_get_ivars(dev); STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { if (pm->pm_reg == reg) return (pm); } return (NULL); } int pci_bar_enabled(device_t dev, struct pci_map *pm) { struct pci_devinfo *dinfo; uint16_t cmd; dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) && !(pm->pm_value & PCIM_BIOS_ENABLE)) return (0); cmd = pci_read_config(dev, PCIR_COMMAND, 2); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value)) return ((cmd & PCIM_CMD_MEMEN) != 0); else return ((cmd & PCIM_CMD_PORTEN) != 0); } struct pci_map * pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size) { struct pci_devinfo *dinfo; struct pci_map *pm, *prev; dinfo = device_get_ivars(dev); pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO); pm->pm_reg = reg; pm->pm_value = value; pm->pm_size = size; STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) { KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x", reg)); if (STAILQ_NEXT(prev, pm_link) == NULL || STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg) break; } if (prev != NULL) STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link); else STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link); return (pm); } static void pci_restore_bars(device_t dev) { struct pci_devinfo *dinfo; struct pci_map *pm; int ln2range; dinfo = device_get_ivars(dev); STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) ln2range = 32; else ln2range = pci_maprange(pm->pm_value); pci_write_config(dev, pm->pm_reg, pm->pm_value, 4); if (ln2range == 64) pci_write_config(dev, pm->pm_reg + 4, pm->pm_value >> 32, 4); } } /* * Add a resource based on a pci map register. Return 1 if the map * register is a 32bit map register or 2 if it is a 64bit register. */ static int pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, int force, int prefetch) { struct pci_map *pm; pci_addr_t base, map, testval; pci_addr_t start, end, count; int barlen, basezero, flags, maprange, mapsize, type; uint16_t cmd; struct resource *res; /* * The BAR may already exist if the device is a CardBus card * whose CIS is stored in this BAR. */ pm = pci_find_bar(dev, reg); if (pm != NULL) { maprange = pci_maprange(pm->pm_value); barlen = maprange == 64 ? 2 : 1; return (barlen); } pci_read_bar(dev, reg, &map, &testval, NULL); if (PCI_BAR_MEM(map)) { type = SYS_RES_MEMORY; if (map & PCIM_BAR_MEM_PREFETCH) prefetch = 1; } else type = SYS_RES_IOPORT; mapsize = pci_mapsize(testval); base = pci_mapbase(map); #ifdef __PCI_BAR_ZERO_VALID basezero = 0; #else basezero = base == 0; #endif maprange = pci_maprange(map); barlen = maprange == 64 ? 2 : 1; /* * For I/O registers, if bottom bit is set, and the next bit up * isn't clear, we know we have a BAR that doesn't conform to the * spec, so ignore it. Also, sanity check the size of the data * areas to the type of memory involved. Memory must be at least * 16 bytes in size, while I/O ranges must be at least 4. */ if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0) return (barlen); if ((type == SYS_RES_MEMORY && mapsize < 4) || (type == SYS_RES_IOPORT && mapsize < 2)) return (barlen); /* Save a record of this BAR. */ pm = pci_add_bar(dev, reg, map, mapsize); if (bootverbose) { printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d", reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize); if (type == SYS_RES_IOPORT && !pci_porten(dev)) printf(", port disabled\n"); else if (type == SYS_RES_MEMORY && !pci_memen(dev)) printf(", memory disabled\n"); else printf(", enabled\n"); } /* * If base is 0, then we have problems if this architecture does * not allow that. It is best to ignore such entries for the * moment. These will be allocated later if the driver specifically * requests them. However, some removable buses look better when * all resources are allocated, so allow '0' to be overriden. * * Similarly treat maps whose values is the same as the test value * read back. These maps have had all f's written to them by the * BIOS in an attempt to disable the resources. */ if (!force && (basezero || map == testval)) return (barlen); if ((u_long)base != base) { device_printf(bus, "pci%d:%d:%d:%d bar %#x too many address bits", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), reg); return (barlen); } /* * This code theoretically does the right thing, but has * undesirable side effects in some cases where peripherals * respond oddly to having these bits enabled. Let the user * be able to turn them off (since pci_enable_io_modes is 1 by * default). */ if (pci_enable_io_modes) { /* Turn on resources that have been left off by a lazy BIOS */ if (type == SYS_RES_IOPORT && !pci_porten(dev)) { cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_PORTEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); } if (type == SYS_RES_MEMORY && !pci_memen(dev)) { cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_MEMEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); } } else { if (type == SYS_RES_IOPORT && !pci_porten(dev)) return (barlen); if (type == SYS_RES_MEMORY && !pci_memen(dev)) return (barlen); } count = (pci_addr_t)1 << mapsize; flags = RF_ALIGNMENT_LOG2(mapsize); if (prefetch) flags |= RF_PREFETCHABLE; if (basezero || base == pci_mapbase(testval) || pci_clear_bars) { start = 0; /* Let the parent decide. */ end = ~0; } else { start = base; end = base + count - 1; } resource_list_add(rl, type, reg, start, end, count); /* * Try to allocate the resource for this BAR from our parent * so that this resource range is already reserved. The * driver for this device will later inherit this resource in * pci_alloc_resource(). */ res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count, flags); if ((pci_do_realloc_bars || pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_REALLOC_BAR)) && res == NULL && (start != 0 || end != ~0)) { /* * If the allocation fails, try to allocate a resource for * this BAR using any available range. The firmware felt * it was important enough to assign a resource, so don't * disable decoding if we can help it. */ resource_list_delete(rl, type, reg); resource_list_add(rl, type, reg, 0, ~0, count); res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0, count, flags); } if (res == NULL) { /* * If the allocation fails, delete the resource list entry * and disable decoding for this device. * * If the driver requests this resource in the future, * pci_reserve_map() will try to allocate a fresh * resource range. */ resource_list_delete(rl, type, reg); pci_disable_io(dev, type); if (bootverbose) device_printf(bus, "pci%d:%d:%d:%d bar %#x failed to allocate\n", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), reg); } else { start = rman_get_start(res); pci_write_bar(dev, pm, start); } return (barlen); } /* * For ATA devices we need to decide early what addressing mode to use. * Legacy demands that the primary and secondary ATA ports sits on the * same addresses that old ISA hardware did. This dictates that we use * those addresses and ignore the BAR's if we cannot set PCI native * addressing mode. */ static void pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force, uint32_t prefetchmask) { int rid, type, progif; #if 0 /* if this device supports PCI native addressing use it */ progif = pci_read_config(dev, PCIR_PROGIF, 1); if ((progif & 0x8a) == 0x8a) { if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) && pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) { printf("Trying ATA native PCI addressing mode\n"); pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1); } } #endif progif = pci_read_config(dev, PCIR_PROGIF, 1); type = SYS_RES_IOPORT; if (progif & PCIP_STORAGE_IDE_MODEPRIM) { pci_add_map(bus, dev, PCIR_BAR(0), rl, force, prefetchmask & (1 << 0)); pci_add_map(bus, dev, PCIR_BAR(1), rl, force, prefetchmask & (1 << 1)); } else { rid = PCIR_BAR(0); resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8, 0); rid = PCIR_BAR(1); resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1, 0); } if (progif & PCIP_STORAGE_IDE_MODESEC) { pci_add_map(bus, dev, PCIR_BAR(2), rl, force, prefetchmask & (1 << 2)); pci_add_map(bus, dev, PCIR_BAR(3), rl, force, prefetchmask & (1 << 3)); } else { rid = PCIR_BAR(2); resource_list_add(rl, type, rid, 0x170, 0x177, 8); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170, 0x177, 8, 0); rid = PCIR_BAR(3); resource_list_add(rl, type, rid, 0x376, 0x376, 1); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376, 0x376, 1, 0); } pci_add_map(bus, dev, PCIR_BAR(4), rl, force, prefetchmask & (1 << 4)); pci_add_map(bus, dev, PCIR_BAR(5), rl, force, prefetchmask & (1 << 5)); } static void pci_assign_interrupt(device_t bus, device_t dev, int force_route) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; char tunable_name[64]; int irq; /* Has to have an intpin to have an interrupt. */ if (cfg->intpin == 0) return; /* Let the user override the IRQ with a tunable. */ irq = PCI_INVALID_IRQ; snprintf(tunable_name, sizeof(tunable_name), "hw.pci%d.%d.%d.INT%c.irq", cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1); if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0)) irq = PCI_INVALID_IRQ; /* * If we didn't get an IRQ via the tunable, then we either use the * IRQ value in the intline register or we ask the bus to route an * interrupt for us. If force_route is true, then we only use the * value in the intline register if the bus was unable to assign an * IRQ. */ if (!PCI_INTERRUPT_VALID(irq)) { if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route) irq = PCI_ASSIGN_INTERRUPT(bus, dev); if (!PCI_INTERRUPT_VALID(irq)) irq = cfg->intline; } /* If after all that we don't have an IRQ, just bail. */ if (!PCI_INTERRUPT_VALID(irq)) return; /* Update the config register if it changed. */ if (irq != cfg->intline) { cfg->intline = irq; pci_write_config(dev, PCIR_INTLINE, irq, 1); } /* Add this IRQ as rid 0 interrupt resource. */ resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1); } /* Perform early OHCI takeover from SMM. */ static void ohci_early_takeover(device_t self) { struct resource *res; uint32_t ctl; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; ctl = bus_read_4(res, OHCI_CONTROL); if (ctl & OHCI_IR) { if (bootverbose) printf("ohci early: " "SMM active, request owner change\n"); bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR); for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) { DELAY(1000); ctl = bus_read_4(res, OHCI_CONTROL); } if (ctl & OHCI_IR) { if (bootverbose) printf("ohci early: " "SMM does not respond, resetting\n"); bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET); } /* Disable interrupts */ bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } /* Perform early UHCI takeover from SMM. */ static void uhci_early_takeover(device_t self) { struct resource *res; int rid; /* * Set the PIRQD enable bit and switch off all the others. We don't * want legacy support to interfere with us XXX Does this also mean * that the BIOS won't touch the keyboard anymore if it is connected * to the ports of the root hub? */ pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2); /* Disable interrupts */ rid = PCI_UHCI_BASE_REG; res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (res != NULL) { bus_write_2(res, UHCI_INTR, 0); bus_release_resource(self, SYS_RES_IOPORT, rid, res); } } /* Perform early EHCI takeover from SMM. */ static void ehci_early_takeover(device_t self) { struct resource *res; uint32_t cparams; uint32_t eec; uint8_t eecp; uint8_t bios_sem; uint8_t offs; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; cparams = bus_read_4(res, EHCI_HCCPARAMS); /* Synchronise with the BIOS if it owns the controller. */ for (eecp = EHCI_HCC_EECP(cparams); eecp != 0; eecp = EHCI_EECP_NEXT(eec)) { eec = pci_read_config(self, eecp, 4); if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) { continue; } bios_sem = pci_read_config(self, eecp + EHCI_LEGSUP_BIOS_SEM, 1); if (bios_sem == 0) { continue; } if (bootverbose) printf("ehci early: " "SMM active, request owner change\n"); pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1); for (i = 0; (i < 100) && (bios_sem != 0); i++) { DELAY(1000); bios_sem = pci_read_config(self, eecp + EHCI_LEGSUP_BIOS_SEM, 1); } if (bios_sem != 0) { if (bootverbose) printf("ehci early: " "SMM does not respond\n"); } /* Disable interrupts */ offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION)); bus_write_4(res, offs + EHCI_USBINTR, 0); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } /* Perform early XHCI takeover from SMM. */ static void xhci_early_takeover(device_t self) { struct resource *res; uint32_t cparams; uint32_t eec; uint8_t eecp; uint8_t bios_sem; uint8_t offs; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; cparams = bus_read_4(res, XHCI_HCSPARAMS0); eec = -1; /* Synchronise with the BIOS if it owns the controller. */ for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec); eecp += XHCI_XECP_NEXT(eec) << 2) { eec = bus_read_4(res, eecp); if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY) continue; bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); if (bios_sem == 0) continue; if (bootverbose) printf("xhci early: " "SMM active, request owner change\n"); bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1); /* wait a maximum of 5 second */ for (i = 0; (i < 5000) && (bios_sem != 0); i++) { DELAY(1000); bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); } if (bios_sem != 0) { if (bootverbose) printf("xhci early: " "SMM does not respond\n"); } /* Disable interrupts */ offs = bus_read_1(res, XHCI_CAPLENGTH); bus_write_4(res, offs + XHCI_USBCMD, 0); bus_read_4(res, offs + XHCI_USBSTS); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static void pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg, struct resource_list *rl) { struct resource *res; char *cp; rman_res_t start, end, count; int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; sub_reg = PCIR_SUBBUS_2; break; default: return; } /* * If the existing bus range is valid, attempt to reserve it * from our parent. If this fails for any reason, clear the * secbus and subbus registers. * * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus? * This would at least preserve the existing sec_bus if it is * valid. */ sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1); sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1); /* Quirk handling. */ switch (pci_get_devid(dev)) { case 0x12258086: /* Intel 82454KX/GX (Orion) */ sup_bus = pci_read_config(dev, 0x41, 1); if (sup_bus != 0xff) { sec_bus = sup_bus + 1; sub_bus = sup_bus + 1; PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1); PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); } break; case 0x00dd10de: /* Compaq R3000 BIOS sets wrong subordinate bus number. */ if ((cp = kern_getenv("smbios.planar.maker")) == NULL) break; if (strncmp(cp, "Compal", 6) != 0) { freeenv(cp); break; } freeenv(cp); if ((cp = kern_getenv("smbios.planar.product")) == NULL) break; if (strncmp(cp, "08A0", 4) != 0) { freeenv(cp); break; } freeenv(cp); if (sub_bus < 0xa) { sub_bus = 0xa; PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); } break; } if (bootverbose) printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus); if (sec_bus > 0 && sub_bus >= sec_bus) { start = sec_bus; end = sub_bus; count = end - start + 1; resource_list_add(rl, PCI_RES_BUS, 0, 0, ~0, count); /* * If requested, clear secondary bus registers in * bridge devices to force a complete renumbering * rather than reserving the existing range. However, * preserve the existing size. */ if (pci_clear_buses) goto clear; rid = 0; res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid, start, end, count, 0); if (res != NULL) return; if (bootverbose) device_printf(bus, "pci%d:%d:%d:%d secbus failed to allocate\n", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); } clear: PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1); PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1); } static struct resource * pci_alloc_secbus(device_t dev, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pci_devinfo *dinfo; pcicfgregs *cfg; struct resource_list *rl; struct resource *res; int sec_reg, sub_reg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; rl = &dinfo->resources; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; sub_reg = PCIR_SUBBUS_2; break; default: return (NULL); } if (*rid != 0) return (NULL); if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL) resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count); if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) { res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { resource_list_delete(rl, PCI_RES_BUS, *rid); device_printf(child, "allocating %ju bus%s failed\n", count, count == 1 ? "" : "es"); return (NULL); } if (bootverbose) device_printf(child, "Lazy allocation of %ju bus%s at %ju\n", count, count == 1 ? "" : "es", rman_get_start(res)); PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1); PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1); } return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start, end, count, flags)); } #endif static int pci_ea_bei_to_rid(device_t dev, int bei) { #ifdef PCI_IOV struct pci_devinfo *dinfo; int iov_pos; struct pcicfg_iov *iov; dinfo = device_get_ivars(dev); iov = dinfo->cfg.iov; if (iov != NULL) iov_pos = iov->iov_pos; else iov_pos = 0; #endif /* Check if matches BAR */ if ((bei >= PCIM_EA_BEI_BAR_0) && (bei <= PCIM_EA_BEI_BAR_5)) return (PCIR_BAR(bei)); /* Check ROM */ if (bei == PCIM_EA_BEI_ROM) return (PCIR_BIOS); #ifdef PCI_IOV /* Check if matches VF_BAR */ if ((iov != NULL) && (bei >= PCIM_EA_BEI_VF_BAR_0) && (bei <= PCIM_EA_BEI_VF_BAR_5)) return (PCIR_SRIOV_BAR(bei - PCIM_EA_BEI_VF_BAR_0) + iov_pos); #endif return (-1); } int pci_ea_is_enabled(device_t dev, int rid) { struct pci_ea_entry *ea; struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { if (pci_ea_bei_to_rid(dev, ea->eae_bei) == rid) return ((ea->eae_flags & PCIM_EA_ENABLE) > 0); } return (0); } void pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov) { struct pci_ea_entry *ea; struct pci_devinfo *dinfo; pci_addr_t start, end, count; struct resource_list *rl; int type, flags, rid; struct resource *res; uint32_t tmp; #ifdef PCI_IOV struct pcicfg_iov *iov; #endif dinfo = device_get_ivars(dev); rl = &dinfo->resources; flags = 0; #ifdef PCI_IOV iov = dinfo->cfg.iov; #endif if (dinfo->cfg.ea.ea_location == 0) return; STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { /* * TODO: Ignore EA-BAR if is not enabled. * Currently the EA implementation supports * only situation, where EA structure contains * predefined entries. In case they are not enabled * leave them unallocated and proceed with * a legacy-BAR mechanism. */ if ((ea->eae_flags & PCIM_EA_ENABLE) == 0) continue; switch ((ea->eae_flags & PCIM_EA_PP) >> PCIM_EA_PP_OFFSET) { case PCIM_EA_P_MEM_PREFETCH: case PCIM_EA_P_VF_MEM_PREFETCH: flags = RF_PREFETCHABLE; /* FALLTHROUGH */ case PCIM_EA_P_VF_MEM: case PCIM_EA_P_MEM: type = SYS_RES_MEMORY; break; case PCIM_EA_P_IO: type = SYS_RES_IOPORT; break; default: continue; } if (alloc_iov != 0) { #ifdef PCI_IOV /* Allocating IOV, confirm BEI matches */ if ((ea->eae_bei < PCIM_EA_BEI_VF_BAR_0) || (ea->eae_bei > PCIM_EA_BEI_VF_BAR_5)) continue; #else continue; #endif } else { /* Allocating BAR, confirm BEI matches */ if (((ea->eae_bei < PCIM_EA_BEI_BAR_0) || (ea->eae_bei > PCIM_EA_BEI_BAR_5)) && (ea->eae_bei != PCIM_EA_BEI_ROM)) continue; } rid = pci_ea_bei_to_rid(dev, ea->eae_bei); if (rid < 0) continue; /* Skip resources already allocated by EA */ if ((resource_list_find(rl, SYS_RES_MEMORY, rid) != NULL) || (resource_list_find(rl, SYS_RES_IOPORT, rid) != NULL)) continue; start = ea->eae_base; count = ea->eae_max_offset + 1; #ifdef PCI_IOV if (iov != NULL) count = count * iov->iov_num_vfs; #endif end = start + count - 1; if (count == 0) continue; resource_list_add(rl, type, rid, start, end, count); res = resource_list_reserve(rl, bus, dev, type, &rid, start, end, count, flags); if (res == NULL) { resource_list_delete(rl, type, rid); /* * Failed to allocate using EA, disable entry. * Another attempt to allocation will be performed * further, but this time using legacy BAR registers */ tmp = pci_read_config(dev, ea->eae_cfg_offset, 4); tmp &= ~PCIM_EA_ENABLE; pci_write_config(dev, ea->eae_cfg_offset, tmp, 4); /* * Disabling entry might fail in case it is hardwired. * Read flags again to match current status. */ ea->eae_flags = pci_read_config(dev, ea->eae_cfg_offset, 4); continue; } /* As per specification, fill BAR with zeros */ pci_write_config(dev, rid, 0, 4); } } void pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask) { struct pci_devinfo *dinfo; pcicfgregs *cfg; struct resource_list *rl; const struct pci_quirk *q; uint32_t devid; int i; dinfo = device_get_ivars(dev); cfg = &dinfo->cfg; rl = &dinfo->resources; devid = (cfg->device << 16) | cfg->vendor; /* Allocate resources using Enhanced Allocation */ pci_add_resources_ea(bus, dev, 0); /* ATA devices needs special map treatment */ if ((pci_get_class(dev) == PCIC_STORAGE) && (pci_get_subclass(dev) == PCIS_STORAGE_IDE) && ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) || (!pci_read_config(dev, PCIR_BAR(0), 4) && !pci_read_config(dev, PCIR_BAR(2), 4))) ) pci_ata_maps(bus, dev, rl, force, prefetchmask); else for (i = 0; i < cfg->nummaps;) { /* Skip resources already managed by EA */ if ((resource_list_find(rl, SYS_RES_MEMORY, PCIR_BAR(i)) != NULL) || (resource_list_find(rl, SYS_RES_IOPORT, PCIR_BAR(i)) != NULL) || pci_ea_is_enabled(dev, PCIR_BAR(i))) { i++; continue; } /* * Skip quirked resources. */ for (q = &pci_quirks[0]; q->devid != 0; q++) if (q->devid == devid && q->type == PCI_QUIRK_UNMAP_REG && q->arg1 == PCIR_BAR(i)) break; if (q->devid != 0) { i++; continue; } i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force, prefetchmask & (1 << i)); } /* * Add additional, quirked resources. */ for (q = &pci_quirks[0]; q->devid != 0; q++) if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG) pci_add_map(bus, dev, q->arg1, rl, force, 0); if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) { #ifdef __PCI_REROUTE_INTERRUPT /* * Try to re-route interrupts. Sometimes the BIOS or * firmware may leave bogus values in these registers. * If the re-route fails, then just stick with what we * have. */ pci_assign_interrupt(bus, dev, 1); #else pci_assign_interrupt(bus, dev, 0); #endif } if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS && pci_get_subclass(dev) == PCIS_SERIALBUS_USB) { if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI) xhci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI) ehci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI) ohci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI) uhci_early_takeover(dev); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) /* * Reserve resources for secondary bus ranges behind bridge * devices. */ pci_reserve_secbus(bus, dev, cfg, rl); #endif } static struct pci_devinfo * pci_identify_function(device_t pcib, device_t dev, int domain, int busno, int slot, int func) { struct pci_devinfo *dinfo; dinfo = pci_read_device(pcib, dev, domain, busno, slot, func); if (dinfo != NULL) pci_add_child(dev, dinfo); return (dinfo); } void pci_add_children(device_t dev, int domain, int busno) { #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) device_t pcib = device_get_parent(dev); struct pci_devinfo *dinfo; int maxslots; int s, f, pcifunchigh; uint8_t hdrtype; int first_func; /* * Try to detect a device at slot 0, function 0. If it exists, try to * enable ARI. We must enable ARI before detecting the rest of the * functions on this bus as ARI changes the set of slots and functions * that are legal on this bus. */ dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0); if (dinfo != NULL && pci_enable_ari) PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev); /* * Start looking for new devices on slot 0 at function 1 because we * just identified the device at slot 0, function 0. */ first_func = 1; maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++, first_func = 0) { pcifunchigh = 0; f = 0; DELAY(1); hdrtype = REG(PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCIB_MAXFUNCS(pcib); for (f = first_func; f <= pcifunchigh; f++) pci_identify_function(pcib, dev, domain, busno, s, f); } #undef REG } int pci_rescan_method(device_t dev) { #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) device_t pcib = device_get_parent(dev); device_t child, *devlist, *unchanged; int devcount, error, i, j, maxslots, oldcount; int busno, domain, s, f, pcifunchigh; uint8_t hdrtype; /* No need to check for ARI on a rescan. */ error = device_get_children(dev, &devlist, &devcount); if (error) return (error); if (devcount != 0) { unchanged = malloc(devcount * sizeof(device_t), M_TEMP, M_NOWAIT | M_ZERO); if (unchanged == NULL) { free(devlist, M_TEMP); return (ENOMEM); } } else unchanged = NULL; domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++) { /* If function 0 is not present, skip to the next slot. */ f = 0; if (REG(PCIR_VENDOR, 2) == 0xffff) continue; pcifunchigh = 0; hdrtype = REG(PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCIB_MAXFUNCS(pcib); for (f = 0; f <= pcifunchigh; f++) { if (REG(PCIR_VENDOR, 2) == 0xffff) continue; /* * Found a valid function. Check if a * device_t for this device already exists. */ for (i = 0; i < devcount; i++) { child = devlist[i]; if (child == NULL) continue; if (pci_get_slot(child) == s && pci_get_function(child) == f) { unchanged[i] = child; goto next_func; } } pci_identify_function(pcib, dev, domain, busno, s, f); next_func:; } } /* Remove devices that are no longer present. */ for (i = 0; i < devcount; i++) { if (unchanged[i] != NULL) continue; device_delete_child(dev, devlist[i]); } free(devlist, M_TEMP); oldcount = devcount; /* Try to attach the devices just added. */ error = device_get_children(dev, &devlist, &devcount); if (error) { free(unchanged, M_TEMP); return (error); } for (i = 0; i < devcount; i++) { for (j = 0; j < oldcount; j++) { if (devlist[i] == unchanged[j]) goto next_device; } device_probe_and_attach(devlist[i]); next_device:; } free(unchanged, M_TEMP); free(devlist, M_TEMP); return (0); #undef REG } #ifdef PCI_IOV device_t pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did) { struct pci_devinfo *vf_dinfo; device_t pcib; int busno, slot, func; pcib = device_get_parent(bus); PCIB_DECODE_RID(pcib, rid, &busno, &slot, &func); vf_dinfo = pci_fill_devinfo(pcib, bus, pci_get_domain(pcib), busno, slot, func, vid, did); vf_dinfo->cfg.flags |= PCICFG_VF; pci_add_child(bus, vf_dinfo); return (vf_dinfo->cfg.dev); } device_t pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did) { return (pci_add_iov_child(bus, pf, rid, vid, did)); } #endif /* * For PCIe device set Max_Payload_Size to match PCIe root's. */ static void pcie_setup_mps(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); device_t root; uint16_t rmps, mmps, mps; if (dinfo->cfg.pcie.pcie_location == 0) return; root = pci_find_pcie_root_port(dev); if (root == NULL) return; /* Check whether the MPS is already configured. */ rmps = pcie_read_config(root, PCIER_DEVICE_CTL, 2) & PCIEM_CTL_MAX_PAYLOAD; mps = pcie_read_config(dev, PCIER_DEVICE_CTL, 2) & PCIEM_CTL_MAX_PAYLOAD; if (mps == rmps) return; /* Check whether the device is capable of the root's MPS. */ mmps = (pcie_read_config(dev, PCIER_DEVICE_CAP, 2) & PCIEM_CAP_MAX_PAYLOAD) << 5; if (rmps > mmps) { /* * The device is unable to handle root's MPS. Limit root. * XXX: We should traverse through all the tree, applying * it to all the devices. */ pcie_adjust_config(root, PCIER_DEVICE_CTL, PCIEM_CTL_MAX_PAYLOAD, mmps, 2); } else { pcie_adjust_config(dev, PCIER_DEVICE_CTL, PCIEM_CTL_MAX_PAYLOAD, rmps, 2); } } static void pci_add_child_clear_aer(device_t dev, struct pci_devinfo *dinfo) { int aer; uint32_t r; uint16_t r2; if (dinfo->cfg.pcie.pcie_location != 0 && dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) { r2 = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_ROOT_CTL, 2); r2 &= ~(PCIEM_ROOT_CTL_SERR_CORR | PCIEM_ROOT_CTL_SERR_NONFATAL | PCIEM_ROOT_CTL_SERR_FATAL); pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_ROOT_CTL, r2, 2); } if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); if (r != 0 && bootverbose) { pci_printf(&dinfo->cfg, "clearing AER UC 0x%08x -> 0x%08x\n", r, pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4)); } r = pci_read_config(dev, aer + PCIR_AER_UC_MASK, 4); r &= ~(PCIM_AER_UC_TRAINING_ERROR | PCIM_AER_UC_DL_PROTOCOL_ERROR | PCIM_AER_UC_SURPRISE_LINK_DOWN | PCIM_AER_UC_POISONED_TLP | PCIM_AER_UC_FC_PROTOCOL_ERROR | PCIM_AER_UC_COMPLETION_TIMEOUT | PCIM_AER_UC_COMPLETER_ABORT | PCIM_AER_UC_UNEXPECTED_COMPLETION | PCIM_AER_UC_RECEIVER_OVERFLOW | PCIM_AER_UC_MALFORMED_TLP | PCIM_AER_UC_ECRC_ERROR | PCIM_AER_UC_UNSUPPORTED_REQUEST | PCIM_AER_UC_ACS_VIOLATION | PCIM_AER_UC_INTERNAL_ERROR | PCIM_AER_UC_MC_BLOCKED_TLP | PCIM_AER_UC_ATOMIC_EGRESS_BLK | PCIM_AER_UC_TLP_PREFIX_BLOCKED); pci_write_config(dev, aer + PCIR_AER_UC_MASK, r, 4); r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); if (r != 0 && bootverbose) { pci_printf(&dinfo->cfg, "clearing AER COR 0x%08x -> 0x%08x\n", r, pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4)); } r = pci_read_config(dev, aer + PCIR_AER_COR_MASK, 4); r &= ~(PCIM_AER_COR_RECEIVER_ERROR | PCIM_AER_COR_BAD_TLP | PCIM_AER_COR_BAD_DLLP | PCIM_AER_COR_REPLAY_ROLLOVER | PCIM_AER_COR_REPLAY_TIMEOUT | PCIM_AER_COR_ADVISORY_NF_ERROR | PCIM_AER_COR_INTERNAL_ERROR | PCIM_AER_COR_HEADER_LOG_OVFLOW); pci_write_config(dev, aer + PCIR_AER_COR_MASK, r, 4); r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, 2); r |= PCIEM_CTL_COR_ENABLE | PCIEM_CTL_NFER_ENABLE | PCIEM_CTL_FER_ENABLE | PCIEM_CTL_URR_ENABLE; pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, r, 2); } } void pci_add_child(device_t bus, struct pci_devinfo *dinfo) { device_t dev; dinfo->cfg.dev = dev = device_add_child(bus, NULL, -1); device_set_ivars(dev, dinfo); resource_list_init(&dinfo->resources); pci_cfg_save(dev, dinfo, 0); pci_cfg_restore(dev, dinfo); pci_print_verbose(dinfo); pci_add_resources(bus, dev, 0, 0); pcie_setup_mps(dev); pci_child_added(dinfo->cfg.dev); if (pci_clear_aer_on_attach) pci_add_child_clear_aer(dev, dinfo); EVENTHANDLER_INVOKE(pci_add_device, dinfo->cfg.dev); } void pci_child_added_method(device_t dev, device_t child) { } static int pci_probe(device_t dev) { device_set_desc(dev, "PCI bus"); /* Allow other subclasses to override this driver. */ return (BUS_PROBE_GENERIC); } int pci_attach_common(device_t dev) { struct pci_softc *sc; int busno, domain; #ifdef PCI_RES_BUS int rid; #endif sc = device_get_softc(dev); domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); #ifdef PCI_RES_BUS rid = 0; sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno, 1, 0); if (sc->sc_bus == NULL) { device_printf(dev, "failed to allocate bus number\n"); return (ENXIO); } #endif if (bootverbose) device_printf(dev, "domain=%d, physical bus=%d\n", domain, busno); sc->sc_dma_tag = bus_get_dma_tag(dev); return (0); } int pci_attach(device_t dev) { int busno, domain, error; error = pci_attach_common(dev); if (error) return (error); /* * Since there can be multiple independently numbered PCI * buses on systems with multiple PCI domains, we can't use * the unit number to decide which bus we are probing. We ask * the parent pcib what our domain and bus numbers are. */ domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); pci_add_children(dev, domain, busno); return (bus_generic_attach(dev)); } int pci_detach(device_t dev) { #ifdef PCI_RES_BUS struct pci_softc *sc; #endif int error; error = bus_generic_detach(dev); if (error) return (error); #ifdef PCI_RES_BUS sc = device_get_softc(dev); error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus); if (error) return (error); #endif return (device_delete_children(dev)); } static void pci_hint_device_unit(device_t dev, device_t child, const char *name, int *unitp) { int line, unit; const char *at; char me1[24], me2[32]; uint8_t b, s, f; uint32_t d; d = pci_get_domain(child); b = pci_get_bus(child); s = pci_get_slot(child); f = pci_get_function(child); snprintf(me1, sizeof(me1), "pci%u:%u:%u", b, s, f); snprintf(me2, sizeof(me2), "pci%u:%u:%u:%u", d, b, s, f); line = 0; while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { resource_string_value(name, unit, "at", &at); if (strcmp(at, me1) != 0 && strcmp(at, me2) != 0) continue; /* No match, try next candidate */ *unitp = unit; return; } } static void pci_set_power_child(device_t dev, device_t child, int state) { device_t pcib; int dstate; /* * Set the device to the given state. If the firmware suggests * a different power state, use it instead. If power management * is not present, the firmware is responsible for managing * device power. Skip children who aren't attached since they * are handled separately. */ pcib = device_get_parent(dev); dstate = state; if (device_is_attached(child) && PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0) pci_set_powerstate(child, dstate); } int pci_suspend_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list_entry *rle; int error; dinfo = device_get_ivars(child); /* * Save the PCI configuration space for the child and set the * device in the appropriate power state for this sleep state. */ pci_cfg_save(child, dinfo, 0); /* Suspend devices before potentially powering them down. */ error = bus_generic_suspend_child(dev, child); if (error) return (error); if (pci_do_power_suspend) { /* * Make sure this device's interrupt handler is not invoked * in the case the device uses a shared interrupt that can * be raised by some other device. * This is applicable only to regular (legacy) PCI interrupts * as MSI/MSI-X interrupts are never shared. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) (void)bus_suspend_intr(child, rle->res); pci_set_power_child(dev, child, PCI_POWERSTATE_D3); } return (0); } int pci_resume_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list_entry *rle; if (pci_do_power_resume) pci_set_power_child(dev, child, PCI_POWERSTATE_D0); dinfo = device_get_ivars(child); pci_cfg_restore(child, dinfo); if (!device_is_attached(child)) pci_cfg_save(child, dinfo, 1); bus_generic_resume_child(dev, child); /* * Allow interrupts only after fully resuming the driver and hardware. */ if (pci_do_power_suspend) { /* See pci_suspend_child for details. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) (void)bus_resume_intr(child, rle->res); } return (0); } int pci_resume(device_t dev) { device_t child, *devlist; int error, i, numdevs; if ((error = device_get_children(dev, &devlist, &numdevs)) != 0) return (error); /* * Resume critical devices first, then everything else later. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; switch (pci_get_class(child)) { case PCIC_DISPLAY: case PCIC_MEMORY: case PCIC_BRIDGE: case PCIC_BASEPERIPH: BUS_RESUME_CHILD(dev, child); break; } } for (i = 0; i < numdevs; i++) { child = devlist[i]; switch (pci_get_class(child)) { case PCIC_DISPLAY: case PCIC_MEMORY: case PCIC_BRIDGE: case PCIC_BASEPERIPH: break; default: BUS_RESUME_CHILD(dev, child); } } free(devlist, M_TEMP); return (0); } static void pci_load_vendor_data(void) { caddr_t data; void *ptr; size_t sz; data = preload_search_by_type("pci_vendor_data"); if (data != NULL) { ptr = preload_fetch_addr(data); sz = preload_fetch_size(data); if (ptr != NULL && sz != 0) { pci_vendordata = ptr; pci_vendordata_size = sz; /* terminate the database */ pci_vendordata[pci_vendordata_size] = '\n'; } } } void pci_driver_added(device_t dev, driver_t *driver) { int numdevs; device_t *devlist; device_t child; struct pci_devinfo *dinfo; int i; if (bootverbose) device_printf(dev, "driver added\n"); DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs) != 0) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) != DS_NOTPRESENT) continue; dinfo = device_get_ivars(child); pci_print_verbose(dinfo); if (bootverbose) pci_printf(&dinfo->cfg, "reprobing on driver added\n"); pci_cfg_restore(child, dinfo); if (device_probe_and_attach(child) != 0) pci_child_detached(dev, child); } free(devlist, M_TEMP); } int pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { struct pci_devinfo *dinfo; struct msix_table_entry *mte; struct msix_vector *mv; uint64_t addr; uint32_t data; void *cookie; int error, rid; error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr, arg, &cookie); if (error) return (error); /* If this is not a direct child, just bail out. */ if (device_get_parent(child) != dev) { *cookiep = cookie; return(0); } rid = rman_get_rid(irq); if (rid == 0) { /* Make sure that INTx is enabled */ pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); } else { /* * Check to see if the interrupt is MSI or MSI-X. * Ask our parent to map the MSI and give * us the address and data register values. * If we fail for some reason, teardown the * interrupt handler. */ dinfo = device_get_ivars(child); if (dinfo->cfg.msi.msi_alloc > 0) { if (dinfo->cfg.msi.msi_addr == 0) { KASSERT(dinfo->cfg.msi.msi_handlers == 0, ("MSI has handlers, but vectors not mapped")); error = PCIB_MAP_MSI(device_get_parent(dev), child, rman_get_start(irq), &addr, &data); if (error) goto bad; dinfo->cfg.msi.msi_addr = addr; dinfo->cfg.msi.msi_data = data; } if (dinfo->cfg.msi.msi_handlers == 0) pci_enable_msi(child, dinfo->cfg.msi.msi_addr, dinfo->cfg.msi.msi_data); dinfo->cfg.msi.msi_handlers++; } else { KASSERT(dinfo->cfg.msix.msix_alloc > 0, ("No MSI or MSI-X interrupts allocated")); KASSERT(rid <= dinfo->cfg.msix.msix_table_len, ("MSI-X index too high")); mte = &dinfo->cfg.msix.msix_table[rid - 1]; KASSERT(mte->mte_vector != 0, ("no message vector")); mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1]; KASSERT(mv->mv_irq == rman_get_start(irq), ("IRQ mismatch")); if (mv->mv_address == 0) { KASSERT(mte->mte_handlers == 0, ("MSI-X table entry has handlers, but vector not mapped")); error = PCIB_MAP_MSI(device_get_parent(dev), child, rman_get_start(irq), &addr, &data); if (error) goto bad; mv->mv_address = addr; mv->mv_data = data; } /* * The MSIX table entry must be made valid by * incrementing the mte_handlers before * calling pci_enable_msix() and * pci_resume_msix(). Else the MSIX rewrite * table quirk will not work as expected. */ mte->mte_handlers++; if (mte->mte_handlers == 1) { pci_enable_msix(child, rid - 1, mv->mv_address, mv->mv_data); pci_unmask_msix(child, rid - 1); } } /* * Make sure that INTx is disabled if we are using MSI/MSI-X, * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG, * in which case we "enable" INTx so MSI/MSI-X actually works. */ if (!pci_has_quirk(pci_get_devid(child), PCI_QUIRK_MSI_INTX_BUG)) pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); else pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); bad: if (error) { (void)bus_generic_teardown_intr(dev, child, irq, cookie); return (error); } } *cookiep = cookie; return (0); } int pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct msix_table_entry *mte; struct resource_list_entry *rle; struct pci_devinfo *dinfo; int error, rid; if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE)) return (EINVAL); /* If this isn't a direct child, just bail out */ if (device_get_parent(child) != dev) return(bus_generic_teardown_intr(dev, child, irq, cookie)); rid = rman_get_rid(irq); if (rid == 0) { /* Mask INTx */ pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); } else { /* * Check to see if the interrupt is MSI or MSI-X. If so, * decrement the appropriate handlers count and mask the * MSI-X message, or disable MSI messages if the count * drops to 0. */ dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); if (rle->res != irq) return (EINVAL); if (dinfo->cfg.msi.msi_alloc > 0) { KASSERT(rid <= dinfo->cfg.msi.msi_alloc, ("MSI-X index too high")); if (dinfo->cfg.msi.msi_handlers == 0) return (EINVAL); dinfo->cfg.msi.msi_handlers--; if (dinfo->cfg.msi.msi_handlers == 0) pci_disable_msi(child); } else { KASSERT(dinfo->cfg.msix.msix_alloc > 0, ("No MSI or MSI-X interrupts allocated")); KASSERT(rid <= dinfo->cfg.msix.msix_table_len, ("MSI-X index too high")); mte = &dinfo->cfg.msix.msix_table[rid - 1]; if (mte->mte_handlers == 0) return (EINVAL); mte->mte_handlers--; if (mte->mte_handlers == 0) pci_mask_msix(child, rid - 1); } } error = bus_generic_teardown_intr(dev, child, irq, cookie); if (rid > 0) KASSERT(error == 0, ("%s: generic teardown failed for MSI/MSI-X", __func__)); return (error); } int pci_print_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); if (device_get_flags(dev)) retval += printf(" flags %#x", device_get_flags(dev)); retval += printf(" at device %d.%d", pci_get_slot(child), pci_get_function(child)); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static const struct { int class; int subclass; int report; /* 0 = bootverbose, 1 = always */ const char *desc; } pci_nomatch_tab[] = { {PCIC_OLD, -1, 1, "old"}, {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"}, {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"}, {PCIC_STORAGE, -1, 1, "mass storage"}, {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"}, {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"}, {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"}, {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"}, {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"}, {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"}, {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"}, {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"}, {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"}, {PCIC_NETWORK, -1, 1, "network"}, {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"}, {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"}, {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"}, {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"}, {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"}, {PCIC_DISPLAY, -1, 1, "display"}, {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"}, {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"}, {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"}, {PCIC_MULTIMEDIA, -1, 1, "multimedia"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"}, {PCIC_MEMORY, -1, 1, "memory"}, {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"}, {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"}, {PCIC_BRIDGE, -1, 1, "bridge"}, {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"}, {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"}, {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"}, {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"}, {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"}, {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"}, {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"}, {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"}, {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"}, {PCIC_SIMPLECOMM, -1, 1, "simple comms"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */ {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"}, {PCIC_BASEPERIPH, -1, 0, "base peripheral"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"}, {PCIC_INPUTDEV, -1, 1, "input device"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"}, {PCIC_DOCKING, -1, 1, "docking station"}, {PCIC_PROCESSOR, -1, 1, "processor"}, {PCIC_SERIALBUS, -1, 1, "serial bus"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"}, {PCIC_WIRELESS, -1, 1, "wireless controller"}, {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"}, {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"}, {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"}, {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"}, {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"}, {PCIC_SATCOM, -1, 1, "satellite communication"}, {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"}, {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"}, {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"}, {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"}, {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"}, {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"}, {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"}, {PCIC_DASP, -1, 0, "dasp"}, {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"}, {PCIC_DASP, PCIS_DASP_PERFCNTRS, 1, "performance counters"}, {PCIC_DASP, PCIS_DASP_COMM_SYNC, 1, "communication synchronizer"}, {PCIC_DASP, PCIS_DASP_MGMT_CARD, 1, "signal processing management"}, {0, 0, 0, NULL} }; void pci_probe_nomatch(device_t dev, device_t child) { int i, report; const char *cp, *scp; char *device; /* * Look for a listing for this device in a loaded device database. */ report = 1; if ((device = pci_describe_device(child)) != NULL) { device_printf(dev, "<%s>", device); free(device, M_DEVBUF); } else { /* * Scan the class/subclass descriptions for a general * description. */ cp = "unknown"; scp = NULL; for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) { if (pci_nomatch_tab[i].class == pci_get_class(child)) { if (pci_nomatch_tab[i].subclass == -1) { cp = pci_nomatch_tab[i].desc; report = pci_nomatch_tab[i].report; } else if (pci_nomatch_tab[i].subclass == pci_get_subclass(child)) { scp = pci_nomatch_tab[i].desc; report = pci_nomatch_tab[i].report; } } } if (report || bootverbose) { device_printf(dev, "<%s%s%s>", cp ? cp : "", ((cp != NULL) && (scp != NULL)) ? ", " : "", scp ? scp : ""); } } if (report || bootverbose) { printf(" at device %d.%d (no driver attached)\n", pci_get_slot(child), pci_get_function(child)); } pci_cfg_save(child, device_get_ivars(child), 1); } void pci_child_detached(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list *rl; dinfo = device_get_ivars(child); rl = &dinfo->resources; /* * Have to deallocate IRQs before releasing any MSI messages and * have to release MSI messages before deallocating any memory * BARs. */ if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0) pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n"); if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) { if (dinfo->cfg.msi.msi_alloc != 0) pci_printf(&dinfo->cfg, "Device leaked %d MSI " "vectors\n", dinfo->cfg.msi.msi_alloc); else pci_printf(&dinfo->cfg, "Device leaked %d MSI-X " "vectors\n", dinfo->cfg.msix.msix_alloc); (void)pci_release_msi(child); } if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0) pci_printf(&dinfo->cfg, "Device leaked memory resources\n"); if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0) pci_printf(&dinfo->cfg, "Device leaked I/O resources\n"); #ifdef PCI_RES_BUS if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0) pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n"); #endif pci_cfg_save(child, dinfo, 1); } /* * Parse the PCI device database, if loaded, and return a pointer to a * description of the device. * * The database is flat text formatted as follows: * * Any line not in a valid format is ignored. * Lines are terminated with newline '\n' characters. * * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then * the vendor name. * * A DEVICE line is entered immediately below the corresponding VENDOR ID. * - devices cannot be listed without a corresponding VENDOR line. * A DEVICE line consists of a TAB, the 4 digit (hex) device code, * another TAB, then the device name. */ /* * Assuming (ptr) points to the beginning of a line in the database, * return the vendor or device and description of the next entry. * The value of (vendor) or (device) inappropriate for the entry type * is set to -1. Returns nonzero at the end of the database. * * Note that this is slightly unrobust in the face of corrupt data; * we attempt to safeguard against this by spamming the end of the * database with a newline when we initialise. */ static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc) { char *cp = *ptr; int left; *device = -1; *vendor = -1; **desc = '\0'; for (;;) { left = pci_vendordata_size - (cp - pci_vendordata); if (left <= 0) { *ptr = cp; return(1); } /* vendor entry? */ if (*cp != '\t' && sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2) break; /* device entry? */ if (*cp == '\t' && sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2) break; /* skip to next line */ while (*cp != '\n' && left > 0) { cp++; left--; } if (*cp == '\n') { cp++; left--; } } /* skip to next line */ while (*cp != '\n' && left > 0) { cp++; left--; } if (*cp == '\n' && left > 0) cp++; *ptr = cp; return(0); } static char * pci_describe_device(device_t dev) { int vendor, device; char *desc, *vp, *dp, *line; desc = vp = dp = NULL; /* * If we have no vendor data, we can't do anything. */ if (pci_vendordata == NULL) goto out; /* * Scan the vendor data looking for this device */ line = pci_vendordata; if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) goto out; for (;;) { if (pci_describe_parse_line(&line, &vendor, &device, &vp)) goto out; if (vendor == pci_get_vendor(dev)) break; } if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) goto out; for (;;) { if (pci_describe_parse_line(&line, &vendor, &device, &dp)) { *dp = 0; break; } if (vendor != -1) { *dp = 0; break; } if (device == pci_get_device(dev)) break; } if (dp[0] == '\0') snprintf(dp, 80, "0x%x", pci_get_device(dev)); if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) != NULL) sprintf(desc, "%s, %s", vp, dp); out: if (vp != NULL) free(vp, M_DEVBUF); if (dp != NULL) free(dp, M_DEVBUF); return(desc); } int pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct pci_devinfo *dinfo; pcicfgregs *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; switch (which) { case PCI_IVAR_ETHADDR: /* * The generic accessor doesn't deal with failure, so * we set the return value, then return an error. */ *((uint8_t **) result) = NULL; return (EINVAL); case PCI_IVAR_SUBVENDOR: *result = cfg->subvendor; break; case PCI_IVAR_SUBDEVICE: *result = cfg->subdevice; break; case PCI_IVAR_VENDOR: *result = cfg->vendor; break; case PCI_IVAR_DEVICE: *result = cfg->device; break; case PCI_IVAR_DEVID: *result = (cfg->device << 16) | cfg->vendor; break; case PCI_IVAR_CLASS: *result = cfg->baseclass; break; case PCI_IVAR_SUBCLASS: *result = cfg->subclass; break; case PCI_IVAR_PROGIF: *result = cfg->progif; break; case PCI_IVAR_REVID: *result = cfg->revid; break; case PCI_IVAR_INTPIN: *result = cfg->intpin; break; case PCI_IVAR_IRQ: *result = cfg->intline; break; case PCI_IVAR_DOMAIN: *result = cfg->domain; break; case PCI_IVAR_BUS: *result = cfg->bus; break; case PCI_IVAR_SLOT: *result = cfg->slot; break; case PCI_IVAR_FUNCTION: *result = cfg->func; break; case PCI_IVAR_CMDREG: *result = cfg->cmdreg; break; case PCI_IVAR_CACHELNSZ: *result = cfg->cachelnsz; break; case PCI_IVAR_MINGNT: if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { *result = -1; return (EINVAL); } *result = cfg->mingnt; break; case PCI_IVAR_MAXLAT: if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { *result = -1; return (EINVAL); } *result = cfg->maxlat; break; case PCI_IVAR_LATTIMER: *result = cfg->lattimer; break; default: return (ENOENT); } return (0); } int pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case PCI_IVAR_INTPIN: dinfo->cfg.intpin = value; return (0); case PCI_IVAR_ETHADDR: case PCI_IVAR_SUBVENDOR: case PCI_IVAR_SUBDEVICE: case PCI_IVAR_VENDOR: case PCI_IVAR_DEVICE: case PCI_IVAR_DEVID: case PCI_IVAR_CLASS: case PCI_IVAR_SUBCLASS: case PCI_IVAR_PROGIF: case PCI_IVAR_REVID: case PCI_IVAR_IRQ: case PCI_IVAR_DOMAIN: case PCI_IVAR_BUS: case PCI_IVAR_SLOT: case PCI_IVAR_FUNCTION: return (EINVAL); /* disallow for now */ default: return (ENOENT); } } #include "opt_ddb.h" #ifdef DDB #include #include /* * List resources based on pci map registers, used for within ddb */ DB_SHOW_COMMAND(pciregs, db_pci_dump) { struct pci_devinfo *dinfo; struct devlist *devlist_head; struct pci_conf *p; const char *name; int i, error, none_count; none_count = 0; /* get the head of the device queue */ devlist_head = &pci_devq; /* * Go through the list of devices and print out devices */ for (error = 0, i = 0, dinfo = STAILQ_FIRST(devlist_head); (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit; dinfo = STAILQ_NEXT(dinfo, pci_links), i++) { /* Populate pd_name and pd_unit */ name = NULL; if (dinfo->cfg.dev) name = device_get_name(dinfo->cfg.dev); p = &dinfo->conf; db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x " "chip=0x%08x rev=0x%02x hdr=0x%02x\n", (name && *name) ? name : "none", (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) : none_count++, p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev, p->pc_sel.pc_func, (p->pc_class << 16) | (p->pc_subclass << 8) | p->pc_progif, (p->pc_subdevice << 16) | p->pc_subvendor, (p->pc_device << 16) | p->pc_vendor, p->pc_revid, p->pc_hdr); } } #endif /* DDB */ static struct resource * pci_reserve_map(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int num, u_int flags) { struct pci_devinfo *dinfo = device_get_ivars(child); struct resource_list *rl = &dinfo->resources; struct resource *res; struct pci_map *pm; uint16_t cmd; pci_addr_t map, testval; int mapsize; res = NULL; /* If rid is managed by EA, ignore it */ if (pci_ea_is_enabled(child, *rid)) goto out; pm = pci_find_bar(child, *rid); if (pm != NULL) { /* This is a BAR that we failed to allocate earlier. */ mapsize = pm->pm_size; map = pm->pm_value; } else { /* * Weed out the bogons, and figure out how large the * BAR/map is. BARs that read back 0 here are bogus * and unimplemented. Note: atapci in legacy mode are * special and handled elsewhere in the code. If you * have a atapci device in legacy mode and it fails * here, that other code is broken. */ pci_read_bar(child, *rid, &map, &testval, NULL); /* * Determine the size of the BAR and ignore BARs with a size * of 0. Device ROM BARs use a different mask value. */ if (PCIR_IS_BIOS(&dinfo->cfg, *rid)) mapsize = pci_romsize(testval); else mapsize = pci_mapsize(testval); if (mapsize == 0) goto out; pm = pci_add_bar(child, *rid, map, mapsize); } if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) { if (type != SYS_RES_MEMORY) { if (bootverbose) device_printf(dev, "child %s requested type %d for rid %#x," " but the BAR says it is an memio\n", device_get_nameunit(child), type, *rid); goto out; } } else { if (type != SYS_RES_IOPORT) { if (bootverbose) device_printf(dev, "child %s requested type %d for rid %#x," " but the BAR says it is an ioport\n", device_get_nameunit(child), type, *rid); goto out; } } /* * For real BARs, we need to override the size that * the driver requests, because that's what the BAR * actually uses and we would otherwise have a * situation where we might allocate the excess to * another driver, which won't work. */ count = ((pci_addr_t)1 << mapsize) * num; if (RF_ALIGNMENT(flags) < mapsize) flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize); if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH)) flags |= RF_PREFETCHABLE; /* * Allocate enough resource, and then write back the * appropriate BAR for that resource. */ resource_list_add(rl, type, *rid, start, end, count); res = resource_list_reserve(rl, dev, child, type, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { resource_list_delete(rl, type, *rid); device_printf(child, "%#jx bytes of rid %#x res %d failed (%#jx, %#jx).\n", count, *rid, type, start, end); goto out; } if (bootverbose) device_printf(child, "Lazy allocation of %#jx bytes rid %#x type %d at %#jx\n", count, *rid, type, rman_get_start(res)); /* Disable decoding via the CMD register before updating the BAR */ cmd = pci_read_config(child, PCIR_COMMAND, 2); pci_write_config(child, PCIR_COMMAND, cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); map = rman_get_start(res); pci_write_bar(child, pm, map); /* Restore the original value of the CMD register */ pci_write_config(child, PCIR_COMMAND, cmd, 2); out: return (res); } struct resource * pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_long num, u_int flags) { struct pci_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; struct resource *res; pcicfgregs *cfg; /* * Perform lazy resource allocation */ dinfo = device_get_ivars(child); rl = &dinfo->resources; cfg = &dinfo->cfg; switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_alloc_secbus(dev, child, rid, start, end, count, flags)); #endif case SYS_RES_IRQ: /* * Can't alloc legacy interrupt once MSI messages have * been allocated. */ if (*rid == 0 && (cfg->msi.msi_alloc > 0 || cfg->msix.msix_alloc > 0)) return (NULL); /* * If the child device doesn't have an interrupt * routed and is deserving of an interrupt, try to * assign it one. */ if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) && (cfg->intpin != 0)) pci_assign_interrupt(dev, child, 0); break; case SYS_RES_IOPORT: case SYS_RES_MEMORY: #ifdef NEW_PCIB /* * PCI-PCI bridge I/O window resources are not BARs. * For those allocations just pass the request up the * tree. */ if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) { switch (*rid) { case PCIR_IOBASEL_1: case PCIR_MEMBASE_1: case PCIR_PMBASEL_1: /* * XXX: Should we bother creating a resource * list entry? */ return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } } #endif /* Reserve resources for this BAR if needed. */ rle = resource_list_find(rl, type, *rid); if (rle == NULL) { res = pci_reserve_map(dev, child, type, rid, start, end, count, num, flags); if (res == NULL) return (NULL); } } return (resource_list_alloc(rl, dev, child, type, rid, start, end, count, flags)); } struct resource * pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifdef PCI_IOV struct pci_devinfo *dinfo; #endif if (device_get_parent(child) != dev) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); #ifdef PCI_IOV dinfo = device_get_ivars(child); if (dinfo->cfg.flags & PCICFG_VF) { switch (type) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (NULL); case SYS_RES_MEMORY: return (pci_vf_alloc_mem_resource(dev, child, rid, start, end, count, flags)); } /* Fall through for other types of resource allocations. */ } #endif return (pci_alloc_multi_resource(dev, child, type, rid, start, end, count, 1, flags)); } int pci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pci_devinfo *dinfo; struct resource_list *rl; pcicfgregs *cfg; if (device_get_parent(child) != dev) return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r)); dinfo = device_get_ivars(child); cfg = &dinfo->cfg; #ifdef PCI_IOV if (dinfo->cfg.flags & PCICFG_VF) { switch (type) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (EDOOFUS); case SYS_RES_MEMORY: return (pci_vf_release_mem_resource(dev, child, rid, r)); } /* Fall through for other types of resource allocations. */ } #endif #ifdef NEW_PCIB /* * PCI-PCI bridge I/O window resources are not BARs. For * those allocations just pass the request up the tree. */ if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE && (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) { switch (rid) { case PCIR_IOBASEL_1: case PCIR_MEMBASE_1: case PCIR_PMBASEL_1: return (bus_generic_release_resource(dev, child, type, rid, r)); } } #endif rl = &dinfo->resources; return (resource_list_release(rl, dev, child, type, rid, r)); } int pci_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pci_devinfo *dinfo; int error; error = bus_generic_activate_resource(dev, child, type, rid, r); if (error) return (error); /* Enable decoding in the command register when activating BARs. */ if (device_get_parent(child) == dev) { /* Device ROMs need their decoding explicitly enabled. */ dinfo = device_get_ivars(child); if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) pci_write_bar(child, pci_find_bar(child, rid), rman_get_start(r) | PCIM_BIOS_ENABLE); switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: error = PCI_ENABLE_IO(dev, child, type); break; } } return (error); } int pci_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pci_devinfo *dinfo; int error; error = bus_generic_deactivate_resource(dev, child, type, rid, r); if (error) return (error); /* Disable decoding for device ROMs. */ if (device_get_parent(child) == dev) { dinfo = device_get_ivars(child); if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) pci_write_bar(child, pci_find_bar(child, rid), rman_get_start(r)); } return (0); } void pci_child_deleted(device_t dev, device_t child) { struct resource_list_entry *rle; struct resource_list *rl; struct pci_devinfo *dinfo; dinfo = device_get_ivars(child); rl = &dinfo->resources; EVENTHANDLER_INVOKE(pci_delete_device, child); /* Turn off access to resources we're about to free */ if (bus_child_present(child) != 0) { pci_write_config(child, PCIR_COMMAND, pci_read_config(child, PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2); pci_disable_busmaster(child); } /* Free all allocated resources */ STAILQ_FOREACH(rle, rl, link) { if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, rle->type, rle->rid)) { pci_printf(&dinfo->cfg, "Resource still owned, oops. " "(type=%d, rid=%d, addr=%lx)\n", rle->type, rle->rid, rman_get_start(rle->res)); bus_release_resource(child, rle->type, rle->rid, rle->res); } resource_list_unreserve(rl, dev, child, rle->type, rle->rid); } } resource_list_free(rl); pci_freecfg(dinfo); } void pci_delete_resource(device_t dev, device_t child, int type, int rid) { struct pci_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return; dinfo = device_get_ivars(child); rl = &dinfo->resources; rle = resource_list_find(rl, type, rid); if (rle == NULL) return; if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, type, rid)) { device_printf(dev, "delete_resource: " "Resource still owned by child, oops. " "(type=%d, rid=%d, addr=%jx)\n", type, rid, rman_get_start(rle->res)); return; } resource_list_unreserve(rl, dev, child, type, rid); } resource_list_delete(rl, type, rid); } struct resource_list * pci_get_resource_list (device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } #ifdef IOMMU bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev) { bus_dma_tag_t tag; struct pci_softc *sc; if (device_get_parent(dev) == bus) { /* try iommu and return if it works */ tag = iommu_get_dma_tag(bus, dev); } else tag = NULL; if (tag == NULL) { sc = device_get_softc(bus); tag = sc->sc_dma_tag; } return (tag); } #else bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev) { struct pci_softc *sc = device_get_softc(bus); return (sc->sc_dma_tag); } #endif uint32_t pci_read_config_method(device_t dev, device_t child, int reg, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; #ifdef PCI_IOV /* * SR-IOV VFs don't implement the VID or DID registers, so we have to * emulate them here. */ if (cfg->flags & PCICFG_VF) { if (reg == PCIR_VENDOR) { switch (width) { case 4: return (cfg->device << 16 | cfg->vendor); case 2: return (cfg->vendor); case 1: return (cfg->vendor & 0xff); default: return (0xffffffff); } } else if (reg == PCIR_DEVICE) { switch (width) { /* Note that an unaligned 4-byte read is an error. */ case 2: return (cfg->device); case 1: return (cfg->device & 0xff); default: return (0xffffffff); } } } #endif return (PCIB_READ_CONFIG(device_get_parent(dev), cfg->bus, cfg->slot, cfg->func, reg, width)); } void pci_write_config_method(device_t dev, device_t child, int reg, uint32_t val, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; PCIB_WRITE_CONFIG(device_get_parent(dev), cfg->bus, cfg->slot, cfg->func, reg, val, width); } int -pci_child_location_str_method(device_t dev, device_t child, char *buf, - size_t buflen) +pci_child_location_method(device_t dev, device_t child, struct sbuf *sb) { - snprintf(buf, buflen, "slot=%d function=%d dbsf=pci%d:%d:%d:%d", + sbuf_printf(sb, "slot=%d function=%d dbsf=pci%d:%d:%d:%d", pci_get_slot(child), pci_get_function(child), pci_get_domain(child), pci_get_bus(child), pci_get_slot(child), pci_get_function(child)); return (0); } int -pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf, - size_t buflen) +pci_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb) { struct pci_devinfo *dinfo; pcicfgregs *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; - snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x " + sbuf_printf(sb, "vendor=0x%04x device=0x%04x subvendor=0x%04x " "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device, cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass, cfg->progif); return (0); } int pci_assign_interrupt_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child, cfg->intpin)); } static void pci_lookup(void *arg, const char *name, device_t *dev) { long val; char *end; int domain, bus, slot, func; if (*dev != NULL) return; /* * Accept pciconf-style selectors of either pciD:B:S:F or * pciB:S:F. In the latter case, the domain is assumed to * be zero. */ if (strncmp(name, "pci", 3) != 0) return; val = strtol(name + 3, &end, 10); if (val < 0 || val > INT_MAX || *end != ':') return; domain = val; val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX || *end != ':') return; bus = val; val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX) return; slot = val; if (*end == ':') { val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX || *end != '\0') return; func = val; } else if (*end == '\0') { func = slot; slot = bus; bus = domain; domain = 0; } else return; if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX || func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX)) return; *dev = pci_find_dbsf(domain, bus, slot, func); } static int pci_modevent(module_t mod, int what, void *arg) { static struct cdev *pci_cdev; static eventhandler_tag tag; switch (what) { case MOD_LOAD: STAILQ_INIT(&pci_devq); pci_generation = 0; pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644, "pci"); pci_load_vendor_data(); tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL, 1000); break; case MOD_UNLOAD: if (tag != NULL) EVENTHANDLER_DEREGISTER(dev_lookup, tag); destroy_dev(pci_cdev); break; } return (0); } static void pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo) { #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2) struct pcicfg_pcie *cfg; int version, pos; cfg = &dinfo->cfg.pcie; pos = cfg->pcie_location; version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ENDPOINT || cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl); if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ROOT_EC) WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl); if (version > 1) { WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2); WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2); WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2); } #undef WREG } static void pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo) { pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, dinfo->cfg.pcix.pcix_command, 2); } void pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo) { /* * Restore the device to full power mode. We must do this * before we restore the registers because moving from D3 to * D0 will cause the chip's BARs and some other registers to * be reset to some unknown power on reset values. Cut down * the noise on boot by doing nothing if we are already in * state D0. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1); pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1); pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1); pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1); pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1); pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1); switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1); pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1); break; case PCIM_HDRTYPE_BRIDGE: pci_write_config(dev, PCIR_SECLAT_1, dinfo->cfg.bridge.br_seclat, 1); pci_write_config(dev, PCIR_SUBBUS_1, dinfo->cfg.bridge.br_subbus, 1); pci_write_config(dev, PCIR_SECBUS_1, dinfo->cfg.bridge.br_secbus, 1); pci_write_config(dev, PCIR_PRIBUS_1, dinfo->cfg.bridge.br_pribus, 1); pci_write_config(dev, PCIR_BRIDGECTL_1, dinfo->cfg.bridge.br_control, 2); break; case PCIM_HDRTYPE_CARDBUS: pci_write_config(dev, PCIR_SECLAT_2, dinfo->cfg.bridge.br_seclat, 1); pci_write_config(dev, PCIR_SUBBUS_2, dinfo->cfg.bridge.br_subbus, 1); pci_write_config(dev, PCIR_SECBUS_2, dinfo->cfg.bridge.br_secbus, 1); pci_write_config(dev, PCIR_PRIBUS_2, dinfo->cfg.bridge.br_pribus, 1); pci_write_config(dev, PCIR_BRIDGECTL_2, dinfo->cfg.bridge.br_control, 2); break; } pci_restore_bars(dev); if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_BRIDGE) pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2); /* * Restore extended capabilities for PCI-Express and PCI-X */ if (dinfo->cfg.pcie.pcie_location != 0) pci_cfg_restore_pcie(dev, dinfo); if (dinfo->cfg.pcix.pcix_location != 0) pci_cfg_restore_pcix(dev, dinfo); /* Restore MSI and MSI-X configurations if they are present. */ if (dinfo->cfg.msi.msi_location != 0) pci_resume_msi(dev); if (dinfo->cfg.msix.msix_location != 0) pci_resume_msix(dev); #ifdef PCI_IOV if (dinfo->cfg.iov != NULL) pci_iov_cfg_restore(dev, dinfo); #endif } static void pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo) { #define RREG(n) pci_read_config(dev, pos + (n), 2) struct pcicfg_pcie *cfg; int version, pos; cfg = &dinfo->cfg.pcie; pos = cfg->pcie_location; cfg->pcie_flags = RREG(PCIER_FLAGS); version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ENDPOINT || cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL); if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ROOT_EC) cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL); if (version > 1) { cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2); cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2); cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2); } #undef RREG } static void pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo) { dinfo->cfg.pcix.pcix_command = pci_read_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2); } void pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate) { uint32_t cls; int ps; /* * Some drivers apparently write to these registers w/o updating our * cached copy. No harm happens if we update the copy, so do so here * so we can restore them. The COMMAND register is modified by the * bus w/o updating the cache. This should represent the normally * writable portion of the 'defined' part of type 0/1/2 headers. */ dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2); dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2); dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2); dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1); dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1); dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1); dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1); dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1); dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1); switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2); dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2); dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1); dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1); break; case PCIM_HDRTYPE_BRIDGE: dinfo->cfg.bridge.br_seclat = pci_read_config(dev, PCIR_SECLAT_1, 1); dinfo->cfg.bridge.br_subbus = pci_read_config(dev, PCIR_SUBBUS_1, 1); dinfo->cfg.bridge.br_secbus = pci_read_config(dev, PCIR_SECBUS_1, 1); dinfo->cfg.bridge.br_pribus = pci_read_config(dev, PCIR_PRIBUS_1, 1); dinfo->cfg.bridge.br_control = pci_read_config(dev, PCIR_BRIDGECTL_1, 2); break; case PCIM_HDRTYPE_CARDBUS: dinfo->cfg.bridge.br_seclat = pci_read_config(dev, PCIR_SECLAT_2, 1); dinfo->cfg.bridge.br_subbus = pci_read_config(dev, PCIR_SUBBUS_2, 1); dinfo->cfg.bridge.br_secbus = pci_read_config(dev, PCIR_SECBUS_2, 1); dinfo->cfg.bridge.br_pribus = pci_read_config(dev, PCIR_PRIBUS_2, 1); dinfo->cfg.bridge.br_control = pci_read_config(dev, PCIR_BRIDGECTL_2, 2); dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2); dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2); break; } if (dinfo->cfg.pcie.pcie_location != 0) pci_cfg_save_pcie(dev, dinfo); if (dinfo->cfg.pcix.pcix_location != 0) pci_cfg_save_pcix(dev, dinfo); #ifdef PCI_IOV if (dinfo->cfg.iov != NULL) pci_iov_cfg_save(dev, dinfo); #endif /* * don't set the state for display devices, base peripherals and * memory devices since bad things happen when they are powered down. * We should (a) have drivers that can easily detach and (b) use * generic drivers for these devices so that some device actually * attaches. We need to make sure that when we implement (a) we don't * power the device down on a reattach. */ cls = pci_get_class(dev); if (!setstate) return; switch (pci_do_power_nodriver) { case 0: /* NO powerdown at all */ return; case 1: /* Conservative about what to power down */ if (cls == PCIC_STORAGE) return; /*FALLTHROUGH*/ case 2: /* Aggressive about what to power down */ if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY || cls == PCIC_BASEPERIPH) return; /*FALLTHROUGH*/ case 3: /* Power down everything */ break; } /* * PCI spec says we can only go into D3 state from D0 state. * Transition from D[12] into D0 before going to D3 state. */ ps = pci_get_powerstate(dev); if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D0); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D3); } /* Wrapper APIs suitable for device driver use. */ void pci_save_state(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); pci_cfg_save(dev, dinfo, 0); } void pci_restore_state(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); pci_cfg_restore(dev, dinfo); } static int pci_get_id_method(device_t dev, device_t child, enum pci_id_type type, uintptr_t *id) { return (PCIB_GET_ID(device_get_parent(dev), child, type, id)); } /* Find the upstream port of a given PCI device in a root complex. */ device_t pci_find_pcie_root_port(device_t dev) { struct pci_devinfo *dinfo; devclass_t pci_class; device_t pcib, bus; pci_class = devclass_find("pci"); KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class, ("%s: non-pci device %s", __func__, device_get_nameunit(dev))); /* * Walk the bridge hierarchy until we find a PCI-e root * port or a non-PCI device. */ for (;;) { bus = device_get_parent(dev); KASSERT(bus != NULL, ("%s: null parent of %s", __func__, device_get_nameunit(dev))); pcib = device_get_parent(bus); KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__, device_get_nameunit(bus))); /* * pcib's parent must be a PCI bus for this to be a * PCI-PCI bridge. */ if (device_get_devclass(device_get_parent(pcib)) != pci_class) return (NULL); dinfo = device_get_ivars(pcib); if (dinfo->cfg.pcie.pcie_location != 0 && dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) return (pcib); dev = pcib; } } /* * Wait for pending transactions to complete on a PCI-express function. * * The maximum delay is specified in milliseconds in max_delay. Note * that this function may sleep. * * Returns true if the function is idle and false if the timeout is * exceeded. If dev is not a PCI-express function, this returns true. */ bool pcie_wait_for_pending_transactions(device_t dev, u_int max_delay) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint16_t sta; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (true); sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2); while (sta & PCIEM_STA_TRANSACTION_PND) { if (max_delay == 0) return (false); /* Poll once every 100 milliseconds up to the timeout. */ if (max_delay > 100) { pause_sbt("pcietp", 100 * SBT_1MS, 0, C_HARDCLOCK); max_delay -= 100; } else { pause_sbt("pcietp", max_delay * SBT_1MS, 0, C_HARDCLOCK); max_delay = 0; } sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2); } return (true); } /* * Determine the maximum Completion Timeout in microseconds. * * For non-PCI-express functions this returns 0. */ int pcie_get_max_completion_timeout(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); /* * Functions using the 1.x spec use the default timeout range of * 50 microseconds to 50 milliseconds. Functions that do not * support programmable timeouts also use this range. */ if ((dinfo->cfg.pcie.pcie_flags & PCIEM_FLAGS_VERSION) < 2 || (pci_read_config(dev, cap + PCIER_DEVICE_CAP2, 4) & PCIEM_CAP2_COMP_TIMO_RANGES) == 0) return (50 * 1000); switch (pci_read_config(dev, cap + PCIER_DEVICE_CTL2, 2) & PCIEM_CTL2_COMP_TIMO_VAL) { case PCIEM_CTL2_COMP_TIMO_100US: return (100); case PCIEM_CTL2_COMP_TIMO_10MS: return (10 * 1000); case PCIEM_CTL2_COMP_TIMO_55MS: return (55 * 1000); case PCIEM_CTL2_COMP_TIMO_210MS: return (210 * 1000); case PCIEM_CTL2_COMP_TIMO_900MS: return (900 * 1000); case PCIEM_CTL2_COMP_TIMO_3500MS: return (3500 * 1000); case PCIEM_CTL2_COMP_TIMO_13S: return (13 * 1000 * 1000); case PCIEM_CTL2_COMP_TIMO_64S: return (64 * 1000 * 1000); default: return (50 * 1000); } } void pcie_apei_error(device_t dev, int sev, uint8_t *aerp) { struct pci_devinfo *dinfo = device_get_ivars(dev); const char *s; int aer; uint32_t r, r1; uint16_t rs; if (sev == PCIEM_STA_CORRECTABLE_ERROR) s = "Correctable"; else if (sev == PCIEM_STA_NON_FATAL_ERROR) s = "Uncorrectable (Non-Fatal)"; else s = "Uncorrectable (Fatal)"; device_printf(dev, "%s PCIe error reported by APEI\n", s); if (aerp) { if (sev == PCIEM_STA_CORRECTABLE_ERROR) { r = le32dec(aerp + PCIR_AER_COR_STATUS); r1 = le32dec(aerp + PCIR_AER_COR_MASK); } else { r = le32dec(aerp + PCIR_AER_UC_STATUS); r1 = le32dec(aerp + PCIR_AER_UC_MASK); } device_printf(dev, "status 0x%08x mask 0x%08x", r, r1); if (sev != PCIEM_STA_CORRECTABLE_ERROR) { r = le32dec(aerp + PCIR_AER_UC_SEVERITY); rs = le16dec(aerp + PCIR_AER_CAP_CONTROL); printf(" severity 0x%08x first %d\n", r, rs & 0x1f); } else printf("\n"); } /* As kind of recovery just report and clear the error statuses. */ if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); if (r != 0) { pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); device_printf(dev, "Clearing UC AER errors 0x%08x\n", r); } r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r != 0) { pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); device_printf(dev, "Clearing COR AER errors 0x%08x\n", r); } } if (dinfo->cfg.pcie.pcie_location != 0) { rs = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); if ((rs & (PCIEM_STA_CORRECTABLE_ERROR | PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | PCIEM_STA_UNSUPPORTED_REQ)) != 0) { pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, rs, 2); device_printf(dev, "Clearing PCIe errors 0x%04x\n", rs); } } } /* * Perform a Function Level Reset (FLR) on a device. * * This function first waits for any pending transactions to complete * within the timeout specified by max_delay. If transactions are * still pending, the function will return false without attempting a * reset. * * If dev is not a PCI-express function or does not support FLR, this * function returns false. * * Note that no registers are saved or restored. The caller is * responsible for saving and restoring any registers including * PCI-standard registers via pci_save_state() and * pci_restore_state(). */ bool pcie_flr(device_t dev, u_int max_delay, bool force) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint16_t cmd, ctl; int compl_delay; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (false); if (!(pci_read_config(dev, cap + PCIER_DEVICE_CAP, 4) & PCIEM_CAP_FLR)) return (false); /* * Disable busmastering to prevent generation of new * transactions while waiting for the device to go idle. If * the idle timeout fails, the command register is restored * which will re-enable busmastering. */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCIM_CMD_BUSMASTEREN), 2); if (!pcie_wait_for_pending_transactions(dev, max_delay)) { if (!force) { pci_write_config(dev, PCIR_COMMAND, cmd, 2); return (false); } pci_printf(&dinfo->cfg, "Resetting with transactions pending after %d ms\n", max_delay); /* * Extend the post-FLR delay to cover the maximum * Completion Timeout delay of anything in flight * during the FLR delay. Enforce a minimum delay of * at least 10ms. */ compl_delay = pcie_get_max_completion_timeout(dev) / 1000; if (compl_delay < 10) compl_delay = 10; } else compl_delay = 0; /* Initiate the reset. */ ctl = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); pci_write_config(dev, cap + PCIER_DEVICE_CTL, ctl | PCIEM_CTL_INITIATE_FLR, 2); /* Wait for 100ms. */ pause_sbt("pcieflr", (100 + compl_delay) * SBT_1MS, 0, C_HARDCLOCK); if (pci_read_config(dev, cap + PCIER_DEVICE_STA, 2) & PCIEM_STA_TRANSACTION_PND) pci_printf(&dinfo->cfg, "Transactions pending after FLR!\n"); return (true); } /* * Attempt a power-management reset by cycling the device in/out of D3 * state. PCI spec says we can only go into D3 state from D0 state. * Transition from D[12] into D0 before going to D3 state. */ int pci_power_reset(device_t dev) { int ps; ps = pci_get_powerstate(dev); if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_set_powerstate(dev, PCI_POWERSTATE_D3); pci_set_powerstate(dev, ps); return (0); } /* * Try link drop and retrain of the downstream port of upstream * switch, for PCIe. According to the PCIe 3.0 spec 6.6.1, this must * cause Conventional Hot reset of the device in the slot. * Alternative, for PCIe, could be the secondary bus reset initiatied * on the upstream switch PCIR_BRIDGECTL_1, bit 6. */ int pcie_link_reset(device_t port, int pcie_location) { uint16_t v; v = pci_read_config(port, pcie_location + PCIER_LINK_CTL, 2); v |= PCIEM_LINK_CTL_LINK_DIS; pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2); pause_sbt("pcier1", mstosbt(20), 0, 0); v &= ~PCIEM_LINK_CTL_LINK_DIS; v |= PCIEM_LINK_CTL_RETRAIN_LINK; pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2); pause_sbt("pcier2", mstosbt(100), 0, 0); /* 100 ms */ v = pci_read_config(port, pcie_location + PCIER_LINK_STA, 2); return ((v & PCIEM_LINK_STA_TRAINING) != 0 ? ETIMEDOUT : 0); } static int pci_reset_post(device_t dev, device_t child) { if (dev == device_get_parent(child)) pci_restore_state(child); return (0); } static int pci_reset_prepare(device_t dev, device_t child) { if (dev == device_get_parent(child)) pci_save_state(child); return (0); } static int pci_reset_child(device_t dev, device_t child, int flags) { int error; if (dev == NULL || device_get_parent(child) != dev) return (0); if ((flags & DEVF_RESET_DETACH) != 0) { error = device_get_state(child) == DS_ATTACHED ? device_detach(child) : 0; } else { error = BUS_SUSPEND_CHILD(dev, child); } if (error == 0) { if (!pcie_flr(child, 1000, false)) { error = BUS_RESET_PREPARE(dev, child); if (error == 0) pci_power_reset(child); BUS_RESET_POST(dev, child); } if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } return (error); } const struct pci_device_table * pci_match_device(device_t child, const struct pci_device_table *id, size_t nelt) { bool match; uint16_t vendor, device, subvendor, subdevice, class, subclass, revid; vendor = pci_get_vendor(child); device = pci_get_device(child); subvendor = pci_get_subvendor(child); subdevice = pci_get_subdevice(child); class = pci_get_class(child); subclass = pci_get_subclass(child); revid = pci_get_revid(child); while (nelt-- > 0) { match = true; if (id->match_flag_vendor) match &= vendor == id->vendor; if (id->match_flag_device) match &= device == id->device; if (id->match_flag_subvendor) match &= subvendor == id->subvendor; if (id->match_flag_subdevice) match &= subdevice == id->subdevice; if (id->match_flag_class) match &= class == id->class_id; if (id->match_flag_subclass) match &= subclass == id->subclass; if (id->match_flag_revid) match &= revid == id->revid; if (match) return (id); id++; } return (NULL); } static void pci_print_faulted_dev_name(const struct pci_devinfo *dinfo) { const char *dev_name; device_t dev; dev = dinfo->cfg.dev; printf("pci%d:%d:%d:%d", dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func); dev_name = device_get_name(dev); if (dev_name != NULL) printf(" (%s%d)", dev_name, device_get_unit(dev)); } void pci_print_faulted_dev(void) { struct pci_devinfo *dinfo; device_t dev; int aer, i; uint32_t r1, r2; uint16_t status; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { dev = dinfo->cfg.dev; status = pci_read_config(dev, PCIR_STATUS, 2); status &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT | PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT | PCIM_STATUS_SERR | PCIM_STATUS_PERR; if (status != 0) { pci_print_faulted_dev_name(dinfo); printf(" error 0x%04x\n", status); } if (dinfo->cfg.pcie.pcie_location != 0) { status = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); if ((status & (PCIEM_STA_CORRECTABLE_ERROR | PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | PCIEM_STA_UNSUPPORTED_REQ)) != 0) { pci_print_faulted_dev_name(dinfo); printf(" PCIe DEVCTL 0x%04x DEVSTA 0x%04x\n", pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, 2), status); } } if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r1 = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); r2 = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r1 != 0 || r2 != 0) { pci_print_faulted_dev_name(dinfo); printf(" AER UC 0x%08x Mask 0x%08x Svr 0x%08x\n" " COR 0x%08x Mask 0x%08x Ctl 0x%08x\n", r1, pci_read_config(dev, aer + PCIR_AER_UC_MASK, 4), pci_read_config(dev, aer + PCIR_AER_UC_SEVERITY, 4), r2, pci_read_config(dev, aer + PCIR_AER_COR_MASK, 4), pci_read_config(dev, aer + PCIR_AER_CAP_CONTROL, 4)); for (i = 0; i < 4; i++) { r1 = pci_read_config(dev, aer + PCIR_AER_HEADER_LOG + i * 4, 4); printf(" HL%d: 0x%08x\n", i, r1); } } } } } #ifdef DDB DB_SHOW_COMMAND(pcierr, pci_print_faulted_dev_db) { pci_print_faulted_dev(); } static void db_clear_pcie_errors(const struct pci_devinfo *dinfo) { device_t dev; int aer; uint32_t r; dev = dinfo->cfg.dev; r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, r, 2); if (pci_find_extcap(dev, PCIZ_AER, &aer) != 0) return; r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); if (r != 0) pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r != 0) pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); } DB_COMMAND(pci_clearerr, db_pci_clearerr) { struct pci_devinfo *dinfo; device_t dev; uint16_t status, status1; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { dev = dinfo->cfg.dev; status1 = status = pci_read_config(dev, PCIR_STATUS, 2); status1 &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT | PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT | PCIM_STATUS_SERR | PCIM_STATUS_PERR; if (status1 != 0) { status &= ~status1; pci_write_config(dev, PCIR_STATUS, status, 2); } if (dinfo->cfg.pcie.pcie_location != 0) db_clear_pcie_errors(dinfo); } } #endif diff --git a/sys/dev/pci/pci_private.h b/sys/dev/pci/pci_private.h index d891f592bdbd..095f22db69a9 100644 --- a/sys/dev/pci/pci_private.h +++ b/sys/dev/pci/pci_private.h @@ -1,183 +1,183 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _PCI_PRIVATE_H_ #define _PCI_PRIVATE_H_ /* * Export definitions of the pci bus so that we can more easily share * it with "subclass" buses. */ DECLARE_CLASS(pci_driver); struct pci_softc { bus_dma_tag_t sc_dma_tag; #ifdef PCI_RES_BUS struct resource *sc_bus; #endif }; extern int pci_do_power_resume; extern int pci_do_power_suspend; void pci_add_children(device_t dev, int domain, int busno); void pci_add_child(device_t bus, struct pci_devinfo *dinfo); device_t pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did); void pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask); void pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov); struct pci_devinfo *pci_alloc_devinfo_method(device_t dev); int pci_attach(device_t dev); int pci_attach_common(device_t dev); int pci_detach(device_t dev); int pci_rescan_method(device_t dev); void pci_driver_added(device_t dev, driver_t *driver); int pci_ea_is_enabled(device_t dev, int rid); int pci_print_child(device_t dev, device_t child); void pci_probe_nomatch(device_t dev, device_t child); int pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); int pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value); int pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep); int pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr); int pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw, const char **vptr); int pci_set_powerstate_method(device_t dev, device_t child, int state); int pci_get_powerstate_method(device_t dev, device_t child); uint32_t pci_read_config_method(device_t dev, device_t child, int reg, int width); void pci_write_config_method(device_t dev, device_t child, int reg, uint32_t val, int width); int pci_enable_busmaster_method(device_t dev, device_t child); int pci_disable_busmaster_method(device_t dev, device_t child); int pci_enable_io_method(device_t dev, device_t child, int space); int pci_disable_io_method(device_t dev, device_t child, int space); int pci_find_cap_method(device_t dev, device_t child, int capability, int *capreg); int pci_find_next_cap_method(device_t dev, device_t child, int capability, int start, int *capreg); int pci_find_extcap_method(device_t dev, device_t child, int capability, int *capreg); int pci_find_next_extcap_method(device_t dev, device_t child, int capability, int start, int *capreg); int pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg); int pci_find_next_htcap_method(device_t dev, device_t child, int capability, int start, int *capreg); int pci_alloc_msi_method(device_t dev, device_t child, int *count); int pci_alloc_msix_method(device_t dev, device_t child, int *count); void pci_enable_msi_method(device_t dev, device_t child, uint64_t address, uint16_t data); void pci_enable_msix_method(device_t dev, device_t child, u_int index, uint64_t address, uint32_t data); void pci_disable_msi_method(device_t dev, device_t child); int pci_remap_msix_method(device_t dev, device_t child, int count, const u_int *vectors); int pci_release_msi_method(device_t dev, device_t child); int pci_msi_count_method(device_t dev, device_t child); int pci_msix_count_method(device_t dev, device_t child); int pci_msix_pba_bar_method(device_t dev, device_t child); int pci_msix_table_bar_method(device_t dev, device_t child); struct resource *pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int pci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int pci_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int pci_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); void pci_delete_resource(device_t dev, device_t child, int type, int rid); struct resource_list *pci_get_resource_list (device_t dev, device_t child); struct pci_devinfo *pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f); void pci_print_verbose(struct pci_devinfo *dinfo); int pci_freecfg(struct pci_devinfo *dinfo); void pci_child_deleted(device_t dev, device_t child); void pci_child_detached(device_t dev, device_t child); -int pci_child_location_str_method(device_t cbdev, device_t child, - char *buf, size_t buflen); -int pci_child_pnpinfo_str_method(device_t cbdev, device_t child, - char *buf, size_t buflen); +int pci_child_location_method(device_t cbdev, device_t child, + struct sbuf *sb); +int pci_child_pnpinfo_method(device_t cbdev, device_t child, + struct sbuf *sb); int pci_assign_interrupt_method(device_t dev, device_t child); int pci_resume(device_t dev); int pci_resume_child(device_t dev, device_t child); int pci_suspend_child(device_t dev, device_t child); bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev); void pci_child_added_method(device_t dev, device_t child); /** Restore the config register state. The state must be previously * saved with pci_cfg_save. However, the pci bus driver takes care of * that. This function will also return the device to PCI_POWERSTATE_D0 * if it is currently in a lower power mode. */ void pci_cfg_restore(device_t, struct pci_devinfo *); /** Save the config register state. Optionally set the power state to D3 * if the third argument is non-zero. */ void pci_cfg_save(device_t, struct pci_devinfo *, int); int pci_mapsize(uint64_t testval); void pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp, int *bar64); struct pci_map *pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size); struct resource *pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_long num, u_int flags); int pci_iov_attach_method(device_t bus, device_t dev, struct nvlist *pf_schema, struct nvlist *vf_schema, const char *name); int pci_iov_detach_method(device_t bus, device_t dev); device_t pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did); struct resource *pci_vf_alloc_mem_resource(device_t dev, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int pci_vf_release_mem_resource(device_t dev, device_t child, int rid, struct resource *r); #endif /* _PCI_PRIVATE_H_ */ diff --git a/sys/dev/puc/puc.c b/sys/dev/puc/puc.c index dd6e9b688705..e8130a5f2e9e 100644 --- a/sys/dev/puc/puc.c +++ b/sys/dev/puc/puc.c @@ -1,770 +1,769 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #define PUC_ISRCCNT 5 struct puc_port { struct puc_bar *p_bar; struct resource *p_rres; struct resource *p_ires; device_t p_dev; int p_nr; int p_type; int p_rclk; int p_hasintr:1; serdev_intr_t *p_ihsrc[PUC_ISRCCNT]; void *p_iharg; int p_ipend; }; devclass_t puc_devclass; const char puc_driver_name[] = "puc"; static MALLOC_DEFINE(M_PUC, "PUC", "PUC driver"); SYSCTL_NODE(_hw, OID_AUTO, puc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "puc(9) driver configuration"); struct puc_bar * puc_get_bar(struct puc_softc *sc, int rid) { struct puc_bar *bar; struct rman *rm; rman_res_t end, start; int error, i; /* Find the BAR entry with the given RID. */ i = 0; while (i < PUC_PCI_BARS && sc->sc_bar[i].b_rid != rid) i++; if (i < PUC_PCI_BARS) return (&sc->sc_bar[i]); /* Not found. If we're looking for an unused entry, return NULL. */ if (rid == -1) return (NULL); /* Get an unused entry for us to fill. */ bar = puc_get_bar(sc, -1); if (bar == NULL) return (NULL); bar->b_rid = rid; bar->b_type = SYS_RES_IOPORT; bar->b_res = bus_alloc_resource_any(sc->sc_dev, bar->b_type, &bar->b_rid, RF_ACTIVE); if (bar->b_res == NULL) { bar->b_rid = rid; bar->b_type = SYS_RES_MEMORY; bar->b_res = bus_alloc_resource_any(sc->sc_dev, bar->b_type, &bar->b_rid, RF_ACTIVE); if (bar->b_res == NULL) { bar->b_rid = -1; return (NULL); } } /* Update our managed space. */ rm = (bar->b_type == SYS_RES_IOPORT) ? &sc->sc_ioport : &sc->sc_iomem; start = rman_get_start(bar->b_res); end = rman_get_end(bar->b_res); error = rman_manage_region(rm, start, end); if (error) { bus_release_resource(sc->sc_dev, bar->b_type, bar->b_rid, bar->b_res); bar->b_res = NULL; bar->b_rid = -1; bar = NULL; } return (bar); } static int puc_intr(void *arg) { struct puc_port *port; struct puc_softc *sc = arg; u_long ds, dev, devs; int i, idx, ipend, isrc, nints; uint8_t ilr; nints = 0; while (1) { /* * Obtain the set of devices with pending interrupts. */ devs = sc->sc_serdevs; if (sc->sc_ilr == PUC_ILR_DIGI) { idx = 0; while (devs & (0xfful << idx)) { ilr = ~bus_read_1(sc->sc_port[idx].p_rres, 7); devs &= ~0ul ^ ((u_long)ilr << idx); idx += 8; } } else if (sc->sc_ilr == PUC_ILR_QUATECH) { /* * Don't trust the value if it's the same as the option * register. It may mean that the ILR is not active and * we're reading the option register instead. This may * lead to false positives on 8-port boards. */ ilr = bus_read_1(sc->sc_port[0].p_rres, 7); if (ilr != (sc->sc_cfg_data & 0xff)) devs &= (u_long)ilr; } if (devs == 0UL) break; /* * Obtain the set of interrupt sources from those devices * that have pending interrupts. */ ipend = 0; idx = 0, dev = 1UL; ds = devs; while (ds != 0UL) { while ((ds & dev) == 0UL) idx++, dev <<= 1; ds &= ~dev; port = &sc->sc_port[idx]; port->p_ipend = SERDEV_IPEND(port->p_dev); ipend |= port->p_ipend; } if (ipend == 0) break; i = 0, isrc = SER_INT_OVERRUN; while (ipend) { while (i < PUC_ISRCCNT && !(ipend & isrc)) i++, isrc <<= 1; KASSERT(i < PUC_ISRCCNT, ("%s", __func__)); ipend &= ~isrc; idx = 0, dev = 1UL; ds = devs; while (ds != 0UL) { while ((ds & dev) == 0UL) idx++, dev <<= 1; ds &= ~dev; port = &sc->sc_port[idx]; if (!(port->p_ipend & isrc)) continue; if (port->p_ihsrc[i] != NULL) (*port->p_ihsrc[i])(port->p_iharg); nints++; } } } return ((nints > 0) ? FILTER_HANDLED : FILTER_STRAY); } int puc_bfe_attach(device_t dev) { char buffer[64]; struct puc_bar *bar; struct puc_port *port; struct puc_softc *sc; struct rman *rm; intptr_t res; bus_addr_t ofs, start; bus_size_t size; bus_space_handle_t bsh; bus_space_tag_t bst; int error, idx; sc = device_get_softc(dev); for (idx = 0; idx < PUC_PCI_BARS; idx++) sc->sc_bar[idx].b_rid = -1; do { sc->sc_ioport.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_ioport); if (!error) { sc->sc_iomem.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_iomem); if (!error) { sc->sc_irq.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_irq); if (!error) break; rman_fini(&sc->sc_iomem); } rman_fini(&sc->sc_ioport); } return (error); } while (0); snprintf(buffer, sizeof(buffer), "%s I/O port mapping", device_get_nameunit(dev)); sc->sc_ioport.rm_descr = strdup(buffer, M_PUC); snprintf(buffer, sizeof(buffer), "%s I/O memory mapping", device_get_nameunit(dev)); sc->sc_iomem.rm_descr = strdup(buffer, M_PUC); snprintf(buffer, sizeof(buffer), "%s port numbers", device_get_nameunit(dev)); sc->sc_irq.rm_descr = strdup(buffer, M_PUC); error = puc_config(sc, PUC_CFG_GET_NPORTS, 0, &res); KASSERT(error == 0, ("%s %d", __func__, __LINE__)); sc->sc_nports = (int)res; sc->sc_port = malloc(sc->sc_nports * sizeof(struct puc_port), M_PUC, M_WAITOK|M_ZERO); error = rman_manage_region(&sc->sc_irq, 1, sc->sc_nports); if (error) goto fail; error = puc_config(sc, PUC_CFG_SETUP, 0, &res); if (error) goto fail; for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; port->p_nr = idx + 1; error = puc_config(sc, PUC_CFG_GET_TYPE, idx, &res); if (error) goto fail; port->p_type = res; error = puc_config(sc, PUC_CFG_GET_RID, idx, &res); if (error) goto fail; bar = puc_get_bar(sc, res); if (bar == NULL) { error = ENXIO; goto fail; } port->p_bar = bar; start = rman_get_start(bar->b_res); error = puc_config(sc, PUC_CFG_GET_OFS, idx, &res); if (error) goto fail; ofs = res; error = puc_config(sc, PUC_CFG_GET_LEN, idx, &res); if (error) goto fail; size = res; rm = (bar->b_type == SYS_RES_IOPORT) ? &sc->sc_ioport: &sc->sc_iomem; port->p_rres = rman_reserve_resource(rm, start + ofs, start + ofs + size - 1, size, 0, NULL); if (port->p_rres != NULL) { bsh = rman_get_bushandle(bar->b_res); bst = rman_get_bustag(bar->b_res); bus_space_subregion(bst, bsh, ofs, size, &bsh); rman_set_bushandle(port->p_rres, bsh); rman_set_bustag(port->p_rres, bst); } port->p_ires = rman_reserve_resource(&sc->sc_irq, port->p_nr, port->p_nr, 1, 0, NULL); if (port->p_ires == NULL) { error = ENXIO; goto fail; } error = puc_config(sc, PUC_CFG_GET_CLOCK, idx, &res); if (error) goto fail; port->p_rclk = res; port->p_dev = device_add_child(dev, NULL, -1); if (port->p_dev != NULL) device_set_ivars(port->p_dev, (void *)port); } error = puc_config(sc, PUC_CFG_GET_ILR, 0, &res); if (error) goto fail; sc->sc_ilr = res; if (bootverbose && sc->sc_ilr != 0) device_printf(dev, "using interrupt latch register\n"); sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid, RF_ACTIVE|RF_SHAREABLE); if (sc->sc_ires != NULL) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY, puc_intr, NULL, sc, &sc->sc_icookie); if (error) error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)puc_intr, sc, &sc->sc_icookie); else sc->sc_fastintr = 1; if (error) { device_printf(dev, "could not activate interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); sc->sc_ires = NULL; } } if (sc->sc_ires == NULL) { /* XXX no interrupt resource. Force polled mode. */ sc->sc_polled = 1; } /* Probe and attach our children. */ for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; if (port->p_dev == NULL) continue; error = device_probe_and_attach(port->p_dev); if (error) { device_delete_child(dev, port->p_dev); port->p_dev = NULL; } } /* * If there are no serdev devices, then our interrupt handler * will do nothing. Tear it down. */ if (sc->sc_serdevs == 0UL) bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); return (0); fail: for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; if (port->p_dev != NULL) device_delete_child(dev, port->p_dev); if (port->p_rres != NULL) rman_release_resource(port->p_rres); if (port->p_ires != NULL) rman_release_resource(port->p_ires); } for (idx = 0; idx < PUC_PCI_BARS; idx++) { bar = &sc->sc_bar[idx]; if (bar->b_res != NULL) bus_release_resource(sc->sc_dev, bar->b_type, bar->b_rid, bar->b_res); } rman_fini(&sc->sc_irq); free(__DECONST(void *, sc->sc_irq.rm_descr), M_PUC); rman_fini(&sc->sc_iomem); free(__DECONST(void *, sc->sc_iomem.rm_descr), M_PUC); rman_fini(&sc->sc_ioport); free(__DECONST(void *, sc->sc_ioport.rm_descr), M_PUC); free(sc->sc_port, M_PUC); return (error); } int puc_bfe_detach(device_t dev) { struct puc_bar *bar; struct puc_port *port; struct puc_softc *sc; int error, idx; sc = device_get_softc(dev); /* Detach our children. */ error = 0; for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; if (port->p_dev == NULL) continue; if (device_delete_child(dev, port->p_dev) == 0) { if (port->p_rres != NULL) rman_release_resource(port->p_rres); if (port->p_ires != NULL) rman_release_resource(port->p_ires); } else error = ENXIO; } if (error) return (error); if (sc->sc_serdevs != 0UL) bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); for (idx = 0; idx < PUC_PCI_BARS; idx++) { bar = &sc->sc_bar[idx]; if (bar->b_res != NULL) bus_release_resource(sc->sc_dev, bar->b_type, bar->b_rid, bar->b_res); } rman_fini(&sc->sc_irq); free(__DECONST(void *, sc->sc_irq.rm_descr), M_PUC); rman_fini(&sc->sc_iomem); free(__DECONST(void *, sc->sc_iomem.rm_descr), M_PUC); rman_fini(&sc->sc_ioport); free(__DECONST(void *, sc->sc_ioport.rm_descr), M_PUC); free(sc->sc_port, M_PUC); return (0); } int puc_bfe_probe(device_t dev, const struct puc_cfg *cfg) { struct puc_softc *sc; intptr_t res; int error; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_cfg = cfg; /* We don't attach to single-port serial cards. */ if (cfg->ports == PUC_PORT_1S || cfg->ports == PUC_PORT_1P) return (EDOOFUS); error = puc_config(sc, PUC_CFG_GET_NPORTS, 0, &res); if (error) return (error); error = puc_config(sc, PUC_CFG_GET_DESC, 0, &res); if (error) return (error); if (res != 0) device_set_desc(dev, (const char *)res); return (BUS_PROBE_DEFAULT); } struct resource * puc_bus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct puc_port *port; struct resource *res; device_t assigned, originator; int error; /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (NULL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (rid == NULL || *rid != 0) return (NULL); /* We only support default allocations. */ if (!RMAN_IS_DEFAULT_RANGE(start, end)) return (NULL); if (type == port->p_bar->b_type) res = port->p_rres; else if (type == SYS_RES_IRQ) res = port->p_ires; else return (NULL); if (res == NULL) return (NULL); assigned = rman_get_device(res); if (assigned == NULL) /* Not allocated */ rman_set_device(res, originator); else if (assigned != originator) return (NULL); if (flags & RF_ACTIVE) { error = rman_activate_resource(res); if (error) { if (assigned == NULL) rman_set_device(res, NULL); return (NULL); } } return (res); } int puc_bus_release_resource(device_t dev, device_t child, int type, int rid, struct resource *res) { struct puc_port *port; device_t originator; /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (rid != 0 || res == NULL) return (EINVAL); if (type == port->p_bar->b_type) { if (res != port->p_rres) return (EINVAL); } else if (type == SYS_RES_IRQ) { if (res != port->p_ires) return (EINVAL); if (port->p_hasintr) return (EBUSY); } else return (EINVAL); if (rman_get_device(res) != originator) return (ENXIO); if (rman_get_flags(res) & RF_ACTIVE) rman_deactivate_resource(res); rman_set_device(res, NULL); return (0); } int puc_bus_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct puc_port *port; struct resource *res; rman_res_t start; /* Get our immediate child. */ while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (type == port->p_bar->b_type) res = port->p_rres; else if (type == SYS_RES_IRQ) res = port->p_ires; else return (ENXIO); if (rid != 0 || res == NULL) return (ENXIO); start = rman_get_start(res); if (startp != NULL) *startp = start; if (countp != NULL) *countp = rman_get_end(res) - start + 1; return (0); } int puc_bus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, void (*ihand)(void *), void *arg, void **cookiep) { struct puc_port *port; struct puc_softc *sc; device_t originator; int i, isrc, serdev; sc = device_get_softc(dev); /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (cookiep == NULL || res != port->p_ires) return (EINVAL); /* We demand that serdev devices use filter_only interrupts. */ if (port->p_type == PUC_TYPE_SERIAL && ihand != NULL) return (ENXIO); if (rman_get_device(port->p_ires) != originator) return (ENXIO); /* * Have non-serdev ports handled by the bus implementation. It * supports multiple handlers for a single interrupt as it is, * so we wouldn't add value if we did it ourselves. */ serdev = 0; if (port->p_type == PUC_TYPE_SERIAL) { i = 0, isrc = SER_INT_OVERRUN; while (i < PUC_ISRCCNT) { port->p_ihsrc[i] = SERDEV_IHAND(originator, isrc); if (port->p_ihsrc[i] != NULL) serdev = 1; i++, isrc <<= 1; } } if (!serdev) return (BUS_SETUP_INTR(device_get_parent(dev), originator, sc->sc_ires, flags, filt, ihand, arg, cookiep)); sc->sc_serdevs |= 1UL << (port->p_nr - 1); port->p_hasintr = 1; port->p_iharg = arg; *cookiep = port; return (0); } int puc_bus_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { struct puc_port *port; struct puc_softc *sc; device_t originator; int i; sc = device_get_softc(dev); /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (res != port->p_ires) return (EINVAL); if (rman_get_device(port->p_ires) != originator) return (ENXIO); if (!port->p_hasintr) return (BUS_TEARDOWN_INTR(device_get_parent(dev), originator, sc->sc_ires, cookie)); if (cookie != port) return (EINVAL); port->p_hasintr = 0; port->p_iharg = NULL; for (i = 0; i < PUC_ISRCCNT; i++) port->p_ihsrc[i] = NULL; return (0); } int puc_bus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct puc_port *port; /* Get our immediate child. */ while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (result == NULL) return (EINVAL); switch(index) { case PUC_IVAR_CLOCK: *result = port->p_rclk; break; case PUC_IVAR_TYPE: *result = port->p_type; break; default: return (ENOENT); } return (0); } int puc_bus_print_child(device_t dev, device_t child) { struct puc_port *port; int retval; port = device_get_ivars(child); retval = 0; retval += bus_print_child_header(dev, child); retval += printf(" at port %d", port->p_nr); retval += bus_print_child_footer(dev, child); return (retval); } int -puc_bus_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) +puc_bus_child_location(device_t dev, device_t child, struct sbuf *sb) { struct puc_port *port; port = device_get_ivars(child); - snprintf(buf, buflen, "port=%d", port->p_nr); + sbuf_printf(sb, "port=%d", port->p_nr); return (0); } int -puc_bus_child_pnpinfo_str(device_t dev, device_t child, char *buf, - size_t buflen) +puc_bus_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) { struct puc_port *port; port = device_get_ivars(child); - snprintf(buf, buflen, "type=%d", port->p_type); + sbuf_printf(sb, "type=%d", port->p_type); return (0); } diff --git a/sys/dev/puc/puc_bfe.h b/sys/dev/puc/puc_bfe.h index 68678d3771e1..067a18ba1e71 100644 --- a/sys/dev/puc/puc_bfe.h +++ b/sys/dev/puc/puc_bfe.h @@ -1,102 +1,102 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _DEV_PUC_BFE_H_ #define _DEV_PUC_BFE_H_ #define PUC_PCI_BARS 6 struct puc_cfg; struct puc_port; extern const struct puc_cfg puc_pci_devices[]; extern devclass_t puc_devclass; extern const char puc_driver_name[]; struct puc_bar { struct resource *b_res; int b_rid; int b_type; }; struct puc_softc { device_t sc_dev; const struct puc_cfg *sc_cfg; intptr_t sc_cfg_data; struct puc_bar sc_bar[PUC_PCI_BARS]; struct rman sc_ioport; struct rman sc_iomem; struct rman sc_irq; struct resource *sc_ires; void *sc_icookie; int sc_irid; int sc_nports; struct puc_port *sc_port; int sc_fastintr:1; int sc_leaving:1; int sc_polled:1; int sc_msi:1; int sc_ilr; /* * Bitmask of ports that use the serdev I/F. This allows for * 32 ports on ILP32 machines and 64 ports on LP64 machines. */ u_long sc_serdevs; }; struct puc_bar *puc_get_bar(struct puc_softc *sc, int rid); int puc_bfe_attach(device_t); int puc_bfe_detach(device_t); int puc_bfe_probe(device_t, const struct puc_cfg *); -int puc_bus_child_location_str(device_t, device_t, char *, size_t); -int puc_bus_child_pnpinfo_str(device_t, device_t, char *, size_t); +int puc_bus_child_location(device_t, device_t, struct sbuf *sb); +int puc_bus_child_pnpinfo(device_t, device_t, struct sbuf *sb); struct resource *puc_bus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); int puc_bus_get_resource(device_t, device_t, int, int, rman_res_t *, rman_res_t *); int puc_bus_print_child(device_t, device_t); int puc_bus_read_ivar(device_t, device_t, int, uintptr_t *); int puc_bus_release_resource(device_t, device_t, int, int, struct resource *); int puc_bus_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); int puc_bus_teardown_intr(device_t, device_t, struct resource *, void *); SYSCTL_DECL(_hw_puc); #endif /* _DEV_PUC_BFE_H_ */ diff --git a/sys/dev/puc/puc_pci.c b/sys/dev/puc/puc_pci.c index 3432f6d33b6b..270634f299f8 100644 --- a/sys/dev/puc/puc_pci.c +++ b/sys/dev/puc/puc_pci.c @@ -1,202 +1,202 @@ /* $NetBSD: puc.c,v 1.7 2000/07/29 17:43:38 jlam Exp $ */ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-3-Clause * * Copyright (c) 2002 JF Hay. All rights reserved. * Copyright (c) 2000 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 1996, 1998, 1999 * Christopher G. Demetriou. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Christopher G. Demetriou * for the NetBSD Project. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int puc_msi_disable; SYSCTL_INT(_hw_puc, OID_AUTO, msi_disable, CTLFLAG_RDTUN, &puc_msi_disable, 0, "Disable use of MSI interrupts by puc(9)"); static const struct puc_cfg * puc_pci_match(device_t dev, const struct puc_cfg *desc) { uint16_t vendor, device; uint16_t subvendor, subdevice; vendor = pci_get_vendor(dev); device = pci_get_device(dev); subvendor = pci_get_subvendor(dev); subdevice = pci_get_subdevice(dev); while (desc->vendor != 0xffff) { if (desc->vendor == vendor && desc->device == device) { /* exact match */ if (desc->subvendor == subvendor && desc->subdevice == subdevice) return (desc); /* wildcard match */ if (desc->subvendor == 0xffff) return (desc); } desc++; } /* no match */ return (NULL); } static int puc_pci_probe(device_t dev) { const struct puc_cfg *desc; if ((pci_read_config(dev, PCIR_HDRTYPE, 1) & PCIM_HDRTYPE) != 0) return (ENXIO); desc = puc_pci_match(dev, puc_pci_devices); if (desc == NULL) return (ENXIO); return (puc_bfe_probe(dev, desc)); } static int puc_pci_attach(device_t dev) { struct puc_softc *sc; int error, count; sc = device_get_softc(dev); if (!puc_msi_disable) { count = 1; if (pci_alloc_msi(dev, &count) == 0) { sc->sc_msi = 1; sc->sc_irid = 1; } } error = puc_bfe_attach(dev); if (error != 0 && sc->sc_msi) pci_release_msi(dev); return (error); } static int puc_pci_detach(device_t dev) { struct puc_softc *sc; int error; sc = device_get_softc(dev); error = puc_bfe_detach(dev); if (error != 0) return (error); if (sc->sc_msi) error = pci_release_msi(dev); return (error); } static device_method_t puc_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, puc_pci_probe), DEVMETHOD(device_attach, puc_pci_attach), DEVMETHOD(device_detach, puc_pci_detach), DEVMETHOD(bus_alloc_resource, puc_bus_alloc_resource), DEVMETHOD(bus_release_resource, puc_bus_release_resource), DEVMETHOD(bus_get_resource, puc_bus_get_resource), DEVMETHOD(bus_read_ivar, puc_bus_read_ivar), DEVMETHOD(bus_setup_intr, puc_bus_setup_intr), DEVMETHOD(bus_teardown_intr, puc_bus_teardown_intr), DEVMETHOD(bus_print_child, puc_bus_print_child), - DEVMETHOD(bus_child_pnpinfo_str, puc_bus_child_pnpinfo_str), - DEVMETHOD(bus_child_location_str, puc_bus_child_location_str), + DEVMETHOD(bus_child_pnpinfo, puc_bus_child_pnpinfo), + DEVMETHOD(bus_child_location, puc_bus_child_location), DEVMETHOD_END }; static driver_t puc_pci_driver = { puc_driver_name, puc_pci_methods, sizeof(struct puc_softc), }; DRIVER_MODULE(puc, pci, puc_pci_driver, puc_devclass, 0, 0); MODULE_PNP_INFO("U16:vendor;U16:device;U16:#;U16:#;D:#", pci, puc, puc_pci_devices, nitems(puc_pci_devices) - 1); diff --git a/sys/dev/pwm/ofw_pwmbus.c b/sys/dev/pwm/ofw_pwmbus.c index 359c4f11272b..4a961c39e4d3 100644 --- a/sys/dev/pwm/ofw_pwmbus.c +++ b/sys/dev/pwm/ofw_pwmbus.c @@ -1,223 +1,223 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 Ian Lepore * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "pwmbus_if.h" struct ofw_pwmbus_ivars { struct pwmbus_ivars base; struct ofw_bus_devinfo devinfo; }; struct ofw_pwmbus_softc { struct pwmbus_softc base; }; /* * bus_if methods... */ static device_t ofw_pwmbus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct ofw_pwmbus_ivars *ivars; if ((ivars = malloc(sizeof(struct ofw_pwmbus_ivars), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { return (NULL); } if ((child = device_add_child_ordered(dev, order, name, unit)) == NULL) { free(ivars, M_DEVBUF); return (NULL); } ivars->devinfo.obd_node = -1; device_set_ivars(child, ivars); return (child); } static void ofw_pwmbus_child_deleted(device_t dev, device_t child) { struct ofw_pwmbus_ivars *ivars; ivars = device_get_ivars(child); if (ivars != NULL) { ofw_bus_gen_destroy_devinfo(&ivars->devinfo); free(ivars, M_DEVBUF); } } static const struct ofw_bus_devinfo * ofw_pwmbus_get_devinfo(device_t bus, device_t dev) { struct ofw_pwmbus_ivars *ivars; ivars = device_get_ivars(dev); return (&ivars->devinfo); } /* * device_if methods... */ static int ofw_pwmbus_probe(device_t dev) { if (ofw_bus_get_node(dev) == -1) { return (ENXIO); } device_set_desc(dev, "OFW PWM bus"); return (BUS_PROBE_DEFAULT); } static int ofw_pwmbus_attach(device_t dev) { struct ofw_pwmbus_softc *sc; struct ofw_pwmbus_ivars *ivars; phandle_t node; device_t child, parent; pcell_t chan; bool any_children; sc = device_get_softc(dev); sc->base.dev = dev; parent = device_get_parent(dev); if (PWMBUS_CHANNEL_COUNT(parent, &sc->base.nchannels) != 0 || sc->base.nchannels == 0) { device_printf(dev, "No channels on parent %s\n", device_get_nameunit(parent)); return (ENXIO); } /* * Attach the children found in the fdt node of the hardware controller. * Hardware controllers must implement the ofw_bus_get_node method so * that our call to ofw_bus_get_node() gets back the controller's node. */ any_children = false; node = ofw_bus_get_node(dev); for (node = OF_child(node); node != 0; node = OF_peer(node)) { /* * The child has to have a reg property; its value is the * channel number so range-check it. */ if (OF_getencprop(node, "reg", &chan, sizeof(chan)) == -1) continue; if (chan >= sc->base.nchannels) continue; if ((child = ofw_pwmbus_add_child(dev, 0, NULL, -1)) == NULL) continue; ivars = device_get_ivars(child); ivars->base.pi_channel = chan; /* Set up the standard ofw devinfo. */ if (ofw_bus_gen_setup_devinfo(&ivars->devinfo, node) != 0) { device_delete_child(dev, child); continue; } any_children = true; } /* * If we didn't find any children in the fdt data, add a pwmc(4) child * for each channel, like the base pwmbus does. The idea is that if * there is any fdt data, then we do exactly what it says and nothing * more, otherwise we just provide generic userland access to all the * pwm channels that exist like the base pwmbus's attach code does. */ if (!any_children) { for (chan = 0; chan < sc->base.nchannels; ++chan) { child = ofw_pwmbus_add_child(dev, 0, "pwmc", -1); if (child == NULL) { device_printf(dev, "failed to add pwmc child " " device for channel %u\n", chan); continue; } ivars = device_get_ivars(child); ivars->base.pi_channel = chan; } } bus_enumerate_hinted_children(dev); bus_generic_probe(dev); return (bus_generic_attach(dev)); } static device_method_t ofw_pwmbus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_pwmbus_probe), DEVMETHOD(device_attach, ofw_pwmbus_attach), /* Bus interface */ - DEVMETHOD(bus_child_pnpinfo_str, ofw_bus_gen_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_add_child, ofw_pwmbus_add_child), DEVMETHOD(bus_child_deleted, ofw_pwmbus_child_deleted), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, ofw_pwmbus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; devclass_t ofw_pwmbus_devclass; DEFINE_CLASS_1(pwmbus, ofw_pwmbus_driver, ofw_pwmbus_methods, sizeof(struct pwmbus_softc), pwmbus_driver); EARLY_DRIVER_MODULE(ofw_pwmbus, pwm, ofw_pwmbus_driver, ofw_pwmbus_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(ofw_pwmbus, 1); MODULE_DEPEND(ofw_pwmbus, pwmbus, 1, 1, 1); diff --git a/sys/dev/pwm/pwmbus.c b/sys/dev/pwm/pwmbus.c index cacd21c07709..4c818074f68c 100644 --- a/sys/dev/pwm/pwmbus.c +++ b/sys/dev/pwm/pwmbus.c @@ -1,292 +1,284 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2018 Emmanuel Vadot * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include #include #include #include #include #include #include #include +#include #include #include "pwmbus_if.h" /* * bus_if methods... */ static device_t pwmbus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct pwmbus_ivars *ivars; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); ivars = malloc(sizeof(struct pwmbus_ivars), M_DEVBUF, M_NOWAIT | M_ZERO); if (ivars == NULL) { device_delete_child(dev, child); return (NULL); } device_set_ivars(child, ivars); return (child); } static int -pwmbus_child_location_str(device_t dev, device_t child, char *buf, size_t blen) +pwmbus_child_location(device_t dev, device_t child, struct sbuf *sb) { struct pwmbus_ivars *ivars; ivars = device_get_ivars(child); - snprintf(buf, blen, "hwdev=%s channel=%u", + sbuf_printf(sb, "hwdev=%s channel=%u", device_get_nameunit(device_get_parent(dev)), ivars->pi_channel); return (0); } -static int -pwmbus_child_pnpinfo_str(device_t dev, device_t child, char *buf, - size_t buflen) -{ - *buf = '\0'; - return (0); -} - static void pwmbus_hinted_child(device_t dev, const char *dname, int dunit) { struct pwmbus_ivars *ivars; device_t child; child = pwmbus_add_child(dev, 0, dname, dunit); /* * If there is a channel hint, use it. Otherwise pi_channel was * initialized to zero, so that's the channel we'll use. */ ivars = device_get_ivars(child); resource_int_value(dname, dunit, "channel", &ivars->pi_channel); } static int pwmbus_print_child(device_t dev, device_t child) { struct pwmbus_ivars *ivars; int rv; ivars = device_get_ivars(child); rv = bus_print_child_header(dev, child); rv += printf(" channel %u", ivars->pi_channel); rv += bus_print_child_footer(dev, child); return (rv); } static void pwmbus_probe_nomatch(device_t dev, device_t child) { struct pwmbus_ivars *ivars; ivars = device_get_ivars(child); if (ivars != NULL) device_printf(dev, " on channel %u\n", ivars->pi_channel); return; } static int pwmbus_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct pwmbus_ivars *ivars; ivars = device_get_ivars(child); switch (which) { case PWMBUS_IVAR_CHANNEL: *(u_int *)result = ivars->pi_channel; break; default: return (EINVAL); } return (0); } /* * device_if methods... */ static int pwmbus_probe(device_t dev) { device_set_desc(dev, "PWM bus"); return (BUS_PROBE_GENERIC); } static int pwmbus_attach(device_t dev) { struct pwmbus_softc *sc; struct pwmbus_ivars *ivars; device_t child, parent; u_int chan; sc = device_get_softc(dev); sc->dev = dev; parent = device_get_parent(dev); if (PWMBUS_CHANNEL_COUNT(parent, &sc->nchannels) != 0 || sc->nchannels == 0) { device_printf(sc->dev, "No channels on parent %s\n", device_get_nameunit(parent)); return (ENXIO); } /* Add a pwmc(4) child for each channel. */ for (chan = 0; chan < sc->nchannels; ++chan) { if ((child = pwmbus_add_child(sc->dev, 0, "pwmc", -1)) == NULL) { device_printf(dev, "failed to add pwmc child device " "for channel %u\n", chan); continue; } ivars = device_get_ivars(child); ivars->pi_channel = chan; } bus_enumerate_hinted_children(dev); bus_generic_probe(dev); return (bus_generic_attach(dev)); } static int pwmbus_detach(device_t dev) { int rv; if ((rv = bus_generic_detach(dev)) == 0) rv = device_delete_children(dev); return (rv); } /* * pwmbus_if methods... */ static int pwmbus_channel_config(device_t dev, u_int chan, u_int period, u_int duty) { return (PWMBUS_CHANNEL_CONFIG(device_get_parent(dev), chan, period, duty)); } static int pwmbus_channel_get_config(device_t dev, u_int chan, u_int *period, u_int *duty) { return (PWMBUS_CHANNEL_GET_CONFIG(device_get_parent(dev), chan, period, duty)); } static int pwmbus_channel_get_flags(device_t dev, u_int chan, uint32_t *flags) { return (PWMBUS_CHANNEL_GET_FLAGS(device_get_parent(dev), chan, flags)); } static int pwmbus_channel_enable(device_t dev, u_int chan, bool enable) { return (PWMBUS_CHANNEL_ENABLE(device_get_parent(dev), chan, enable)); } static int pwmbus_channel_set_flags(device_t dev, u_int chan, uint32_t flags) { return (PWMBUS_CHANNEL_SET_FLAGS(device_get_parent(dev), chan, flags)); } static int pwmbus_channel_is_enabled(device_t dev, u_int chan, bool *enable) { return (PWMBUS_CHANNEL_IS_ENABLED(device_get_parent(dev), chan, enable)); } static int pwmbus_channel_count(device_t dev, u_int *nchannel) { return (PWMBUS_CHANNEL_COUNT(device_get_parent(dev), nchannel)); } static device_method_t pwmbus_methods[] = { /* device_if */ DEVMETHOD(device_probe, pwmbus_probe), DEVMETHOD(device_attach, pwmbus_attach), DEVMETHOD(device_detach, pwmbus_detach), /* bus_if */ DEVMETHOD(bus_add_child, pwmbus_add_child), - DEVMETHOD(bus_child_location_str, pwmbus_child_location_str), - DEVMETHOD(bus_child_pnpinfo_str, pwmbus_child_pnpinfo_str), + DEVMETHOD(bus_child_location, pwmbus_child_location), DEVMETHOD(bus_hinted_child, pwmbus_hinted_child), DEVMETHOD(bus_print_child, pwmbus_print_child), DEVMETHOD(bus_probe_nomatch, pwmbus_probe_nomatch), DEVMETHOD(bus_read_ivar, pwmbus_read_ivar), /* pwmbus_if */ DEVMETHOD(pwmbus_channel_count, pwmbus_channel_count), DEVMETHOD(pwmbus_channel_config, pwmbus_channel_config), DEVMETHOD(pwmbus_channel_get_config, pwmbus_channel_get_config), DEVMETHOD(pwmbus_channel_set_flags, pwmbus_channel_set_flags), DEVMETHOD(pwmbus_channel_get_flags, pwmbus_channel_get_flags), DEVMETHOD(pwmbus_channel_enable, pwmbus_channel_enable), DEVMETHOD(pwmbus_channel_is_enabled, pwmbus_channel_is_enabled), DEVMETHOD_END }; driver_t pwmbus_driver = { "pwmbus", pwmbus_methods, sizeof(struct pwmbus_softc), }; devclass_t pwmbus_devclass; EARLY_DRIVER_MODULE(pwmbus, pwm, pwmbus_driver, pwmbus_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(pwmbus, 1); diff --git a/sys/dev/siis/siis.c b/sys/dev/siis/siis.c index 1c0000e89b07..316a9c6b61eb 100644 --- a/sys/dev/siis/siis.c +++ b/sys/dev/siis/siis.c @@ -1,1988 +1,1988 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include "siis.h" #include #include #include #include #include /* local prototypes */ static int siis_setup_interrupt(device_t dev); static void siis_intr(void *data); static int siis_suspend(device_t dev); static int siis_resume(device_t dev); static int siis_ch_init(device_t dev); static int siis_ch_deinit(device_t dev); static int siis_ch_suspend(device_t dev); static int siis_ch_resume(device_t dev); static void siis_ch_intr_locked(void *data); static void siis_ch_intr(void *data); static void siis_ch_led(void *priv, int onoff); static void siis_begin_transaction(device_t dev, union ccb *ccb); static void siis_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void siis_execute_transaction(struct siis_slot *slot); static void siis_timeout(void *arg); static void siis_end_transaction(struct siis_slot *slot, enum siis_err_type et); static int siis_setup_fis(device_t dev, struct siis_cmd *ctp, union ccb *ccb, int tag); static void siis_dmainit(device_t dev); static void siis_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void siis_dmafini(device_t dev); static void siis_slotsalloc(device_t dev); static void siis_slotsfree(device_t dev); static void siis_reset(device_t dev); static void siis_portinit(device_t dev); static int siis_wait_ready(device_t dev, int t); static int siis_sata_connect(struct siis_channel *ch); static void siis_issue_recovery(device_t dev); static void siis_process_read_log(device_t dev, union ccb *ccb); static void siis_process_request_sense(device_t dev, union ccb *ccb); static void siisaction(struct cam_sim *sim, union ccb *ccb); static void siispoll(struct cam_sim *sim); static MALLOC_DEFINE(M_SIIS, "SIIS driver", "SIIS driver data buffers"); static struct { uint32_t id; const char *name; int ports; int quirks; #define SIIS_Q_SNTF 1 #define SIIS_Q_NOMSI 2 } siis_ids[] = { {0x31241095, "SiI3124", 4, 0}, {0x31248086, "SiI3124", 4, 0}, {0x31321095, "SiI3132", 2, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0x02421095, "SiI3132", 2, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0x02441095, "SiI3132", 2, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0x31311095, "SiI3131", 1, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0x35311095, "SiI3531", 1, SIIS_Q_SNTF|SIIS_Q_NOMSI}, {0, NULL, 0, 0} }; #define recovery_type spriv_field0 #define RECOVERY_NONE 0 #define RECOVERY_READ_LOG 1 #define RECOVERY_REQUEST_SENSE 2 #define recovery_slot spriv_field1 static int siis_probe(device_t dev) { char buf[64]; int i; uint32_t devid = pci_get_devid(dev); for (i = 0; siis_ids[i].id != 0; i++) { if (siis_ids[i].id == devid) { snprintf(buf, sizeof(buf), "%s SATA controller", siis_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int siis_attach(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); uint32_t devid = pci_get_devid(dev); device_t child; int error, i, unit; ctlr->dev = dev; for (i = 0; siis_ids[i].id != 0; i++) { if (siis_ids[i].id == devid) break; } ctlr->quirks = siis_ids[i].quirks; /* Global memory */ ctlr->r_grid = PCIR_BAR(0); if (!(ctlr->r_gmem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_grid, RF_ACTIVE))) return (ENXIO); ctlr->gctl = ATA_INL(ctlr->r_gmem, SIIS_GCTL); /* Channels memory */ ctlr->r_rid = PCIR_BAR(2); if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return (ENXIO); /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_grid, ctlr->r_gmem); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_grid, ctlr->r_gmem); rman_fini(&ctlr->sc_iomem); return (error); } pci_enable_busmaster(dev); /* Reset controller */ siis_resume(dev); /* Number of HW channels */ ctlr->channels = siis_ids[i].ports; /* Setup interrupts. */ if (siis_setup_interrupt(dev)) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_grid, ctlr->r_gmem); rman_fini(&ctlr->sc_iomem); return ENXIO; } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "siisch", -1); if (child == NULL) device_printf(dev, "failed to add channel device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); } bus_generic_attach(dev); return 0; } static int siis_detach(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); /* Detach & delete all children */ device_delete_children(dev); /* Free interrupts. */ if (ctlr->irq.r_irq) { bus_teardown_intr(dev, ctlr->irq.r_irq, ctlr->irq.handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); } pci_release_msi(dev); /* Free memory. */ rman_fini(&ctlr->sc_iomem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_grid, ctlr->r_gmem); return (0); } static int siis_suspend(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); bus_generic_suspend(dev); /* Put controller into reset state. */ ctlr->gctl |= SIIS_GCTL_GRESET; ATA_OUTL(ctlr->r_gmem, SIIS_GCTL, ctlr->gctl); return 0; } static int siis_resume(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); /* Set PCIe max read request size to at least 1024 bytes */ if (pci_get_max_read_req(dev) < 1024) pci_set_max_read_req(dev, 1024); /* Put controller into reset state. */ ctlr->gctl |= SIIS_GCTL_GRESET; ATA_OUTL(ctlr->r_gmem, SIIS_GCTL, ctlr->gctl); DELAY(10000); /* Get controller out of reset state and enable port interrupts. */ ctlr->gctl &= ~(SIIS_GCTL_GRESET | SIIS_GCTL_I2C_IE); ctlr->gctl |= 0x0000000f; ATA_OUTL(ctlr->r_gmem, SIIS_GCTL, ctlr->gctl); return (bus_generic_resume(dev)); } static int siis_setup_interrupt(device_t dev) { struct siis_controller *ctlr = device_get_softc(dev); int msi = ctlr->quirks & SIIS_Q_NOMSI ? 0 : 1; /* Process hints. */ resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &msi); if (msi < 0) msi = 0; else if (msi > 0) msi = min(1, pci_msi_count(dev)); /* Allocate MSI if needed/present. */ if (msi && pci_alloc_msi(dev, &msi) != 0) msi = 0; /* Allocate all IRQs. */ ctlr->irq.r_irq_rid = msi ? 1 : 0; if (!(ctlr->irq.r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irq.r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return ENXIO; } if ((bus_setup_intr(dev, ctlr->irq.r_irq, ATA_INTR_FLAGS, NULL, siis_intr, ctlr, &ctlr->irq.handle))) { /* SOS XXX release r_irq */ device_printf(dev, "unable to setup interrupt\n"); return ENXIO; } return (0); } /* * Common case interrupt handler. */ static void siis_intr(void *data) { struct siis_controller *ctlr = (struct siis_controller *)data; u_int32_t is; void *arg; int unit; is = ATA_INL(ctlr->r_gmem, SIIS_IS); for (unit = 0; unit < ctlr->channels; unit++) { if ((is & SIIS_IS_PORT(unit)) != 0 && (arg = ctlr->interrupt[unit].argument)) { ctlr->interrupt[unit].function(arg); } } /* Acknowledge interrupt, if MSI enabled. */ if (ctlr->irq.r_irq_rid) { ATA_OUTL(ctlr->r_gmem, SIIS_GCTL, ctlr->gctl | SIIS_GCTL_MSIACK); } } static struct resource * siis_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct siis_controller *ctlr = device_get_softc(dev); int unit = ((struct siis_channel *)device_get_softc(child))->unit; struct resource *res = NULL; int offset = unit << 13; rman_res_t st; switch (type) { case SYS_RES_MEMORY: st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + 0x2000, 0x2000, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, 0x2000, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irq.r_irq; break; } return (res); } static int siis_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { switch (type) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: if (rid != ATA_IRQ_RID) return ENOENT; return (0); } return (EINVAL); } static int siis_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct siis_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); if (filter != NULL) { printf("siis.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } static int siis_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct siis_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } static int siis_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at channel %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } static int -siis_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) +siis_child_location(device_t dev, device_t child, struct sbuf *sb) { - snprintf(buf, buflen, "channel=%d", + sbuf_printf(sb, "channel=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static bus_dma_tag_t siis_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } devclass_t siis_devclass; static device_method_t siis_methods[] = { DEVMETHOD(device_probe, siis_probe), DEVMETHOD(device_attach, siis_attach), DEVMETHOD(device_detach, siis_detach), DEVMETHOD(device_suspend, siis_suspend), DEVMETHOD(device_resume, siis_resume), DEVMETHOD(bus_print_child, siis_print_child), DEVMETHOD(bus_alloc_resource, siis_alloc_resource), DEVMETHOD(bus_release_resource, siis_release_resource), DEVMETHOD(bus_setup_intr, siis_setup_intr), DEVMETHOD(bus_teardown_intr,siis_teardown_intr), - DEVMETHOD(bus_child_location_str, siis_child_location_str), + DEVMETHOD(bus_child_location, siis_child_location), DEVMETHOD(bus_get_dma_tag, siis_get_dma_tag), { 0, 0 } }; static driver_t siis_driver = { "siis", siis_methods, sizeof(struct siis_controller) }; DRIVER_MODULE(siis, pci, siis_driver, siis_devclass, 0, 0); MODULE_VERSION(siis, 1); MODULE_DEPEND(siis, cam, 1, 1, 1); static int siis_ch_probe(device_t dev) { device_set_desc_copy(dev, "SIIS channel"); return (BUS_PROBE_DEFAULT); } static int siis_ch_attach(device_t dev) { struct siis_controller *ctlr = device_get_softc(device_get_parent(dev)); struct siis_channel *ch = device_get_softc(dev); struct cam_devq *devq; int rid, error, i, sata_rev = 0; ch->dev = dev; ch->unit = (intptr_t)device_get_ivars(dev); ch->quirks = ctlr->quirks; ch->pm_level = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "pm_level", &ch->pm_level); resource_int_value(device_get_name(dev), device_get_unit(dev), "sata_rev", &sata_rev); for (i = 0; i < 16; i++) { ch->user[i].revision = sata_rev; ch->user[i].mode = 0; ch->user[i].bytecount = 8192; ch->user[i].tags = SIIS_MAX_SLOTS; ch->curr[i] = ch->user[i]; if (ch->pm_level) ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ; ch->user[i].caps |= CTS_SATA_CAPS_H_AN; } mtx_init(&ch->mtx, "SIIS channel lock", NULL, MTX_DEF); rid = ch->unit; if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE))) return (ENXIO); siis_dmainit(dev); siis_slotsalloc(dev); siis_ch_init(dev); mtx_lock(&ch->mtx); rid = ATA_IRQ_RID; if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "Unable to map interrupt\n"); error = ENXIO; goto err0; } if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, siis_ch_intr_locked, dev, &ch->ih))) { device_printf(dev, "Unable to setup interrupt\n"); error = ENXIO; goto err1; } /* Create the device queue for our SIM. */ devq = cam_simq_alloc(SIIS_MAX_SLOTS); if (devq == NULL) { device_printf(dev, "Unable to allocate simq\n"); error = ENOMEM; goto err1; } /* Construct SIM entry */ ch->sim = cam_sim_alloc(siisaction, siispoll, "siisch", ch, device_get_unit(dev), &ch->mtx, 2, SIIS_MAX_SLOTS, devq); if (ch->sim == NULL) { cam_simq_free(devq); device_printf(dev, "unable to allocate sim\n"); error = ENOMEM; goto err1; } if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "unable to register xpt bus\n"); error = ENXIO; goto err2; } if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "unable to create path\n"); error = ENXIO; goto err3; } mtx_unlock(&ch->mtx); ch->led = led_create(siis_ch_led, dev, device_get_nameunit(dev)); return (0); err3: xpt_bus_deregister(cam_sim_path(ch->sim)); err2: cam_sim_free(ch->sim, /*free_devq*/TRUE); err1: bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); err0: bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_unlock(&ch->mtx); mtx_destroy(&ch->mtx); return (error); } static int siis_ch_detach(device_t dev) { struct siis_channel *ch = device_get_softc(dev); led_destroy(ch->led); mtx_lock(&ch->mtx); xpt_async(AC_LOST_DEVICE, ch->path, NULL); xpt_free_path(ch->path); xpt_bus_deregister(cam_sim_path(ch->sim)); cam_sim_free(ch->sim, /*free_devq*/TRUE); mtx_unlock(&ch->mtx); bus_teardown_intr(dev, ch->r_irq, ch->ih); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); siis_ch_deinit(dev); siis_slotsfree(dev); siis_dmafini(dev); bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_destroy(&ch->mtx); return (0); } static int siis_ch_init(device_t dev) { struct siis_channel *ch = device_get_softc(dev); /* Get port out of reset state. */ ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PORT_RESET); ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_32BIT); if (ch->pm_present) ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PME); else ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PME); /* Enable port interrupts */ ATA_OUTL(ch->r_mem, SIIS_P_IESET, SIIS_P_IX_ENABLED); return (0); } static int siis_ch_deinit(device_t dev) { struct siis_channel *ch = device_get_softc(dev); /* Put port into reset state. */ ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PORT_RESET); return (0); } static int siis_ch_suspend(device_t dev) { struct siis_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_freeze_simq(ch->sim, 1); while (ch->oslots) msleep(ch, &ch->mtx, PRIBIO, "siissusp", hz/100); siis_ch_deinit(dev); mtx_unlock(&ch->mtx); return (0); } static int siis_ch_resume(device_t dev) { struct siis_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); siis_ch_init(dev); siis_reset(dev); xpt_release_simq(ch->sim, TRUE); mtx_unlock(&ch->mtx); return (0); } devclass_t siisch_devclass; static device_method_t siisch_methods[] = { DEVMETHOD(device_probe, siis_ch_probe), DEVMETHOD(device_attach, siis_ch_attach), DEVMETHOD(device_detach, siis_ch_detach), DEVMETHOD(device_suspend, siis_ch_suspend), DEVMETHOD(device_resume, siis_ch_resume), { 0, 0 } }; static driver_t siisch_driver = { "siisch", siisch_methods, sizeof(struct siis_channel) }; DRIVER_MODULE(siisch, siis, siisch_driver, siis_devclass, 0, 0); static void siis_ch_led(void *priv, int onoff) { device_t dev; struct siis_channel *ch; dev = (device_t)priv; ch = device_get_softc(dev); if (onoff == 0) ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_LED_ON); else ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_LED_ON); } struct siis_dc_cb_args { bus_addr_t maddr; int error; }; static void siis_dmainit(device_t dev) { struct siis_channel *ch = device_get_softc(dev); struct siis_dc_cb_args dcba; /* Command area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, SIIS_WORK_SIZE, 1, SIIS_WORK_SIZE, 0, NULL, NULL, &ch->dma.work_tag)) goto error; if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, 0, &ch->dma.work_map)) goto error; if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, SIIS_WORK_SIZE, siis_dmasetupc_cb, &dcba, 0) || dcba.error) { bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); goto error; } ch->dma.work_bus = dcba.maddr; /* Data area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, SIIS_SG_ENTRIES * PAGE_SIZE, SIIS_SG_ENTRIES, 0xFFFFFFFF, 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) { goto error; } return; error: device_printf(dev, "WARNING - DMA initialization failed\n"); siis_dmafini(dev); } static void siis_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct siis_dc_cb_args *dcba = (struct siis_dc_cb_args *)xsc; if (!(dcba->error = error)) dcba->maddr = segs[0].ds_addr; } static void siis_dmafini(device_t dev) { struct siis_channel *ch = device_get_softc(dev); if (ch->dma.data_tag) { bus_dma_tag_destroy(ch->dma.data_tag); ch->dma.data_tag = NULL; } if (ch->dma.work_bus) { bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); ch->dma.work_bus = 0; ch->dma.work_map = NULL; ch->dma.work = NULL; } if (ch->dma.work_tag) { bus_dma_tag_destroy(ch->dma.work_tag); ch->dma.work_tag = NULL; } } static void siis_slotsalloc(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; /* Alloc and setup command/dma slots */ bzero(ch->slot, sizeof(ch->slot)); for (i = 0; i < SIIS_MAX_SLOTS; i++) { struct siis_slot *slot = &ch->slot[i]; slot->dev = dev; slot->slot = i; slot->state = SIIS_SLOT_EMPTY; slot->prb_offset = SIIS_PRB_SIZE * i; slot->ccb = NULL; callout_init_mtx(&slot->timeout, &ch->mtx, 0); if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) device_printf(ch->dev, "FAILURE - create data_map\n"); } } static void siis_slotsfree(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; /* Free all dma slots */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { struct siis_slot *slot = &ch->slot[i]; callout_drain(&slot->timeout); if (slot->dma.data_map) { bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); slot->dma.data_map = NULL; } } } static void siis_notify_events(device_t dev) { struct siis_channel *ch = device_get_softc(dev); struct cam_path *dpath; u_int32_t status; int i; if (ch->quirks & SIIS_Q_SNTF) { status = ATA_INL(ch->r_mem, SIIS_P_SNTF); ATA_OUTL(ch->r_mem, SIIS_P_SNTF, status); } else { /* * Without SNTF we have no idea which device sent notification. * If PMP is connected, assume it, else - device. */ status = (ch->pm_present) ? 0x8000 : 0x0001; } if (bootverbose) device_printf(dev, "SNTF 0x%04x\n", status); for (i = 0; i < 16; i++) { if ((status & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, NULL, xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) { xpt_async(AC_SCSI_AEN, dpath, NULL); xpt_free_path(dpath); } } } static void siis_phy_check_events(device_t dev) { struct siis_channel *ch = device_get_softc(dev); /* If we have a connection event, deal with it */ if (ch->pm_level == 0) { u_int32_t status = ATA_INL(ch->r_mem, SIIS_P_SSTS); union ccb *ccb; if (bootverbose) { if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) { device_printf(dev, "CONNECT requested\n"); } else device_printf(dev, "DISCONNECT requested\n"); } siis_reset(dev); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } } static void siis_ch_intr_locked(void *data) { device_t dev = (device_t)data; struct siis_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); siis_ch_intr(data); mtx_unlock(&ch->mtx); } static void siis_ch_intr(void *data) { device_t dev = (device_t)data; struct siis_channel *ch = device_get_softc(dev); uint32_t istatus, sstatus, ctx, estatus, ok, err = 0; enum siis_err_type et; int i, ccs, port, tslots; mtx_assert(&ch->mtx, MA_OWNED); /* Read command statuses. */ sstatus = ATA_INL(ch->r_mem, SIIS_P_SS); ok = ch->rslots & ~sstatus; /* Complete all successful commands. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { if ((ok >> i) & 1) siis_end_transaction(&ch->slot[i], SIIS_ERR_NONE); } /* Do we have any other events? */ if ((sstatus & SIIS_P_SS_ATTN) == 0) return; /* Read and clear interrupt statuses. */ istatus = ATA_INL(ch->r_mem, SIIS_P_IS) & (0xFFFF & ~SIIS_P_IX_COMMCOMP); ATA_OUTL(ch->r_mem, SIIS_P_IS, istatus); /* Process PHY events */ if (istatus & SIIS_P_IX_PHYRDYCHG) siis_phy_check_events(dev); /* Process NOTIFY events */ if (istatus & SIIS_P_IX_SDBN) siis_notify_events(dev); /* Process command errors */ if (istatus & SIIS_P_IX_COMMERR) { estatus = ATA_INL(ch->r_mem, SIIS_P_CMDERR); ctx = ATA_INL(ch->r_mem, SIIS_P_CTX); ccs = (ctx & SIIS_P_CTX_SLOT) >> SIIS_P_CTX_SLOT_SHIFT; port = (ctx & SIIS_P_CTX_PMP) >> SIIS_P_CTX_PMP_SHIFT; err = ch->rslots & sstatus; //device_printf(dev, "%s ERROR ss %08x is %08x rs %08x es %d act %d port %d serr %08x\n", // __func__, sstatus, istatus, ch->rslots, estatus, ccs, port, // ATA_INL(ch->r_mem, SIIS_P_SERR)); if (!ch->recoverycmd && !ch->recovery) { xpt_freeze_simq(ch->sim, ch->numrslots); ch->recovery = 1; } if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status &= ~CAM_STATUS_MASK; fccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(fccb); } if (estatus == SIIS_P_CMDERR_DEV || estatus == SIIS_P_CMDERR_SDB || estatus == SIIS_P_CMDERR_DATAFIS) { tslots = ch->numtslots[port]; for (i = 0; i < SIIS_MAX_SLOTS; i++) { /* XXX: requests in loading state. */ if (((ch->rslots >> i) & 1) == 0) continue; if (ch->slot[i].ccb->ccb_h.target_id != port) continue; if (tslots == 0) { /* Untagged operation. */ if (i == ccs) et = SIIS_ERR_TFE; else et = SIIS_ERR_INNOCENT; } else { /* Tagged operation. */ et = SIIS_ERR_NCQ; } siis_end_transaction(&ch->slot[i], et); } /* * We can't reinit port if there are some other * commands active, use resume to complete them. */ if (ch->rslots != 0 && !ch->recoverycmd) ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_RESUME); } else { if (estatus == SIIS_P_CMDERR_SENDFIS || estatus == SIIS_P_CMDERR_INCSTATE || estatus == SIIS_P_CMDERR_PPE || estatus == SIIS_P_CMDERR_SERVICE) { et = SIIS_ERR_SATA; } else et = SIIS_ERR_INVALID; for (i = 0; i < SIIS_MAX_SLOTS; i++) { /* XXX: requests in loading state. */ if (((ch->rslots >> i) & 1) == 0) continue; siis_end_transaction(&ch->slot[i], et); } } } } /* Must be called with channel locked. */ static int siis_check_collision(device_t dev, union ccb *ccb) { struct siis_channel *ch = device_get_softc(dev); mtx_assert(&ch->mtx, MA_OWNED); if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { /* Tagged command while we have no supported tag free. */ if (((~ch->oslots) & (0x7fffffff >> (31 - ch->curr[ccb->ccb_h.target_id].tags))) == 0) return (1); } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) { /* Atomic command while anything active. */ if (ch->numrslots != 0) return (1); } /* We have some atomic command running. */ if (ch->aslots != 0) return (1); return (0); } /* Must be called with channel locked. */ static void siis_begin_transaction(device_t dev, union ccb *ccb) { struct siis_channel *ch = device_get_softc(dev); struct siis_slot *slot; int tag, tags; mtx_assert(&ch->mtx, MA_OWNED); /* Choose empty slot. */ tags = SIIS_MAX_SLOTS; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) tags = ch->curr[ccb->ccb_h.target_id].tags; tag = fls((~ch->oslots) & (0x7fffffff >> (31 - tags))) - 1; /* Occupy chosen slot. */ slot = &ch->slot[tag]; slot->ccb = ccb; /* Update channel stats. */ ch->oslots |= (1 << slot->slot); ch->numrslots++; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots[ccb->ccb_h.target_id]++; } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) ch->aslots |= (1 << slot->slot); slot->dma.nsegs = 0; /* If request moves data, setup and load SG list */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { slot->state = SIIS_SLOT_LOADING; bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, siis_dmasetprd, slot, 0); } else siis_execute_transaction(slot); } /* Locked by busdma engine. */ static void siis_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct siis_slot *slot = arg; struct siis_channel *ch = device_get_softc(slot->dev); struct siis_cmd *ctp; struct siis_dma_prd *prd; int i; mtx_assert(&ch->mtx, MA_OWNED); if (error) { device_printf(slot->dev, "DMA load error\n"); if (!ch->recoverycmd) xpt_freeze_simq(ch->sim, 1); siis_end_transaction(slot, SIIS_ERR_INVALID); return; } KASSERT(nsegs <= SIIS_SG_ENTRIES, ("too many DMA segment entries\n")); slot->dma.nsegs = nsegs; if (nsegs != 0) { /* Get a piece of the workspace for this request */ ctp = (struct siis_cmd *)(ch->dma.work + slot->prb_offset); /* Fill S/G table */ if (slot->ccb->ccb_h.func_code == XPT_ATA_IO) prd = &ctp->u.ata.prd[0]; else prd = &ctp->u.atapi.prd[0]; for (i = 0; i < nsegs; i++) { prd[i].dba = htole64(segs[i].ds_addr); prd[i].dbc = htole32(segs[i].ds_len); prd[i].control = 0; } prd[nsegs - 1].control = htole32(SIIS_PRD_TRM); bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); } siis_execute_transaction(slot); } /* Must be called with channel locked. */ static void siis_execute_transaction(struct siis_slot *slot) { device_t dev = slot->dev; struct siis_channel *ch = device_get_softc(dev); struct siis_cmd *ctp; union ccb *ccb = slot->ccb; u_int64_t prb_bus; mtx_assert(&ch->mtx, MA_OWNED); /* Get a piece of the workspace for this request */ ctp = (struct siis_cmd *)(ch->dma.work + slot->prb_offset); ctp->control = 0; ctp->protocol_override = 0; ctp->transfer_count = 0; /* Special handling for Soft Reset command. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { if (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) { ctp->control |= htole16(SIIS_PRB_SOFT_RESET); } else { ctp->control |= htole16(SIIS_PRB_PROTOCOL_OVERRIDE); if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { ctp->protocol_override |= htole16(SIIS_PRB_PROTO_NCQ); } if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { ctp->protocol_override |= htole16(SIIS_PRB_PROTO_READ); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { ctp->protocol_override |= htole16(SIIS_PRB_PROTO_WRITE); } } } else if (ccb->ccb_h.func_code == XPT_SCSI_IO) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) ctp->control |= htole16(SIIS_PRB_PACKET_READ); else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) ctp->control |= htole16(SIIS_PRB_PACKET_WRITE); } /* Special handling for Soft Reset command. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET)) { /* Kick controller into sane state */ siis_portinit(dev); } /* Setup the FIS for this request */ if (!siis_setup_fis(dev, ctp, ccb, slot->slot)) { device_printf(ch->dev, "Setting up SATA FIS failed\n"); if (!ch->recoverycmd) xpt_freeze_simq(ch->sim, 1); siis_end_transaction(slot, SIIS_ERR_INVALID); return; } bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_PREWRITE); /* Issue command to the controller. */ slot->state = SIIS_SLOT_RUNNING; ch->rslots |= (1 << slot->slot); prb_bus = ch->dma.work_bus + slot->prb_offset; ATA_OUTL(ch->r_mem, SIIS_P_CACTL(slot->slot), prb_bus); ATA_OUTL(ch->r_mem, SIIS_P_CACTH(slot->slot), prb_bus >> 32); /* Start command execution timeout */ callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout, 0, siis_timeout, slot, 0); return; } /* Must be called with channel locked. */ static void siis_process_timeout(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; mtx_assert(&ch->mtx, MA_OWNED); if (!ch->recoverycmd && !ch->recovery) { xpt_freeze_simq(ch->sim, ch->numrslots); ch->recovery = 1; } /* Handle the rest of commands. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < SIIS_SLOT_RUNNING) continue; siis_end_transaction(&ch->slot[i], SIIS_ERR_TIMEOUT); } } /* Must be called with channel locked. */ static void siis_rearm_timeout(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; mtx_assert(&ch->mtx, MA_OWNED); for (i = 0; i < SIIS_MAX_SLOTS; i++) { struct siis_slot *slot = &ch->slot[i]; /* Do we have a running request on slot? */ if (slot->state < SIIS_SLOT_RUNNING) continue; if ((ch->toslots & (1 << i)) == 0) continue; callout_reset_sbt(&slot->timeout, SBT_1MS * slot->ccb->ccb_h.timeout, 0, siis_timeout, slot, 0); } } /* Locked by callout mechanism. */ static void siis_timeout(void *arg) { struct siis_slot *slot = arg; device_t dev = slot->dev; struct siis_channel *ch = device_get_softc(dev); union ccb *ccb = slot->ccb; mtx_assert(&ch->mtx, MA_OWNED); /* Check for stale timeout. */ if (slot->state < SIIS_SLOT_RUNNING) return; /* Handle soft-reset timeouts without doing hard-reset. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET)) { xpt_freeze_simq(ch->sim, ch->numrslots); siis_end_transaction(slot, SIIS_ERR_TFE); return; } device_printf(dev, "Timeout on slot %d\n", slot->slot); device_printf(dev, "%s is %08x ss %08x rs %08x es %08x sts %08x serr %08x\n", __func__, ATA_INL(ch->r_mem, SIIS_P_IS), ATA_INL(ch->r_mem, SIIS_P_SS), ch->rslots, ATA_INL(ch->r_mem, SIIS_P_CMDERR), ATA_INL(ch->r_mem, SIIS_P_STS), ATA_INL(ch->r_mem, SIIS_P_SERR)); if (ch->toslots == 0) xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); if ((ch->rslots & ~ch->toslots) == 0) siis_process_timeout(dev); else device_printf(dev, " ... waiting for slots %08x\n", ch->rslots & ~ch->toslots); } /* Must be called with channel locked. */ static void siis_end_transaction(struct siis_slot *slot, enum siis_err_type et) { device_t dev = slot->dev; struct siis_channel *ch = device_get_softc(dev); union ccb *ccb = slot->ccb; int lastto; mtx_assert(&ch->mtx, MA_OWNED); bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_POSTWRITE); /* Read result registers to the result struct * May be incorrect if several commands finished same time, * so read only when sure or have to. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { struct ata_res *res = &ccb->ataio.res; if ((et == SIIS_ERR_TFE) || (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { int offs = SIIS_P_LRAM_SLOT(slot->slot) + 8; res->status = ATA_INB(ch->r_mem, offs + 2); res->error = ATA_INB(ch->r_mem, offs + 3); res->lba_low = ATA_INB(ch->r_mem, offs + 4); res->lba_mid = ATA_INB(ch->r_mem, offs + 5); res->lba_high = ATA_INB(ch->r_mem, offs + 6); res->device = ATA_INB(ch->r_mem, offs + 7); res->lba_low_exp = ATA_INB(ch->r_mem, offs + 8); res->lba_mid_exp = ATA_INB(ch->r_mem, offs + 9); res->lba_high_exp = ATA_INB(ch->r_mem, offs + 10); res->sector_count = ATA_INB(ch->r_mem, offs + 12); res->sector_count_exp = ATA_INB(ch->r_mem, offs + 13); } else bzero(res, sizeof(*res)); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && ch->numrslots == 1) { ccb->ataio.resid = ccb->ataio.dxfer_len - ATA_INL(ch->r_mem, SIIS_P_LRAM_SLOT(slot->slot) + 4); } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && ch->numrslots == 1) { ccb->csio.resid = ccb->csio.dxfer_len - ATA_INL(ch->r_mem, SIIS_P_LRAM_SLOT(slot->slot) + 4); } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, (ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); } /* Set proper result status. */ if (et != SIIS_ERR_NONE || ch->recovery) { ch->eslots |= (1 << slot->slot); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } /* In case of error, freeze device for proper recovery. */ if (et != SIIS_ERR_NONE && (!ch->recoverycmd) && !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (et) { case SIIS_ERR_NONE: ccb->ccb_h.status |= CAM_REQ_CMP; if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.scsi_status = SCSI_STATUS_OK; break; case SIIS_ERR_INVALID: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_INVALID; break; case SIIS_ERR_INNOCENT: ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; case SIIS_ERR_TFE: case SIIS_ERR_NCQ: if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } else { ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; } break; case SIIS_ERR_SATA: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_UNCOR_PARITY; break; case SIIS_ERR_TIMEOUT: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_CMD_TIMEOUT; break; default: ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } /* Free slot. */ ch->oslots &= ~(1 << slot->slot); ch->rslots &= ~(1 << slot->slot); ch->aslots &= ~(1 << slot->slot); slot->state = SIIS_SLOT_EMPTY; slot->ccb = NULL; /* Update channel stats. */ ch->numrslots--; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots[ccb->ccb_h.target_id]--; } /* Cancel timeout state if request completed normally. */ if (et != SIIS_ERR_TIMEOUT) { lastto = (ch->toslots == (1 << slot->slot)); ch->toslots &= ~(1 << slot->slot); if (lastto) xpt_release_simq(ch->sim, TRUE); } /* If it was our READ LOG command - process it. */ if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { siis_process_read_log(dev, ccb); /* If it was our REQUEST SENSE command - process it. */ } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { siis_process_request_sense(dev, ccb); /* If it was NCQ or ATAPI command error, put result on hold. */ } else if (et == SIIS_ERR_NCQ || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { ch->hold[slot->slot] = ccb; ch->numhslots++; } else xpt_done(ccb); /* If we have no other active commands, ... */ if (ch->rslots == 0) { /* if there were timeouts or fatal error - reset port. */ if (ch->toslots != 0 || ch->fatalerr) { siis_reset(dev); } else { /* if we have slots in error, we can reinit port. */ if (ch->eslots != 0) siis_portinit(dev); /* if there commands on hold, we can do recovery. */ if (!ch->recoverycmd && ch->numhslots) siis_issue_recovery(dev); } /* If all the reset of commands are in timeout - abort them. */ } else if ((ch->rslots & ~ch->toslots) == 0 && et != SIIS_ERR_TIMEOUT) siis_rearm_timeout(dev); /* Unfreeze frozen command. */ if (ch->frozen && !siis_check_collision(dev, ch->frozen)) { union ccb *fccb = ch->frozen; ch->frozen = NULL; siis_begin_transaction(dev, fccb); xpt_release_simq(ch->sim, TRUE); } } static void siis_issue_recovery(device_t dev) { struct siis_channel *ch = device_get_softc(dev); union ccb *ccb; struct ccb_ataio *ataio; struct ccb_scsiio *csio; int i; /* Find some held command. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (ch->hold[i]) break; } if (i == SIIS_MAX_SLOTS) return; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { device_printf(dev, "Unable to allocate recovery command\n"); completeall: /* We can't do anything -- complete held commands. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (ch->hold[i] == NULL) continue; ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } siis_reset(dev); return; } ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { /* READ LOG */ ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; ccb->ccb_h.func_code = XPT_ATA_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ ataio = &ccb->ataio; ataio->data_ptr = malloc(512, M_SIIS, M_NOWAIT); if (ataio->data_ptr == NULL) { xpt_free_ccb(ccb); device_printf(dev, "Unable to allocate memory for READ LOG command\n"); goto completeall; } ataio->dxfer_len = 512; bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = CAM_ATAIO_48BIT; ataio->cmd.command = 0x2F; /* READ LOG EXT */ ataio->cmd.sector_count = 1; ataio->cmd.sector_count_exp = 0; ataio->cmd.lba_low = 0x10; ataio->cmd.lba_mid = 0; ataio->cmd.lba_mid_exp = 0; } else { /* REQUEST SENSE */ ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; ccb->ccb_h.recovery_slot = i; ccb->ccb_h.func_code = XPT_SCSI_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.status = 0; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ csio = &ccb->csio; csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; csio->dxfer_len = ch->hold[i]->csio.sense_len; csio->cdb_len = 6; bzero(&csio->cdb_io, sizeof(csio->cdb_io)); csio->cdb_io.cdb_bytes[0] = 0x03; csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; } ch->recoverycmd = 1; siis_begin_transaction(dev, ccb); } static void siis_process_read_log(device_t dev, union ccb *ccb) { struct siis_channel *ch = device_get_softc(dev); uint8_t *data; struct ata_res *res; int i; ch->recoverycmd = 0; data = ccb->ataio.data_ptr; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (data[0] & 0x80) == 0) { for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.target_id != ccb->ccb_h.target_id) continue; if ((data[0] & 0x1F) == i) { res = &ch->hold[i]->ataio.res; res->status = data[2]; res->error = data[3]; res->lba_low = data[4]; res->lba_mid = data[5]; res->lba_high = data[6]; res->device = data[7]; res->lba_low_exp = data[8]; res->lba_mid_exp = data[9]; res->lba_high_exp = data[10]; res->sector_count = data[12]; res->sector_count_exp = data[13]; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; } xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } else { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) device_printf(dev, "Error while READ LOG EXT\n"); else if ((data[0] & 0x80) == 0) { device_printf(dev, "Non-queued command error in READ LOG EXT\n"); } for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.target_id != ccb->ccb_h.target_id) continue; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } free(ccb->ataio.data_ptr, M_SIIS); xpt_free_ccb(ccb); } static void siis_process_request_sense(device_t dev, union ccb *ccb) { struct siis_channel *ch = device_get_softc(dev); int i; ch->recoverycmd = 0; i = ccb->ccb_h.recovery_slot; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; } xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; xpt_free_ccb(ccb); } static void siis_portinit(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i; ch->eslots = 0; ch->recovery = 0; ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_RESUME); for (i = 0; i < 16; i++) { ATA_OUTL(ch->r_mem, SIIS_P_PMPSTS(i), 0), ATA_OUTL(ch->r_mem, SIIS_P_PMPQACT(i), 0); } ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PORT_INIT); siis_wait_ready(dev, 1000); } static int siis_devreset(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int timeout = 0; uint32_t val; ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_DEV_RESET); while (((val = ATA_INL(ch->r_mem, SIIS_P_STS)) & SIIS_P_CTL_DEV_RESET) != 0) { DELAY(100); if (timeout++ > 1000) { device_printf(dev, "device reset stuck " "(timeout 100ms) status = %08x\n", val); return (EBUSY); } } return (0); } static int siis_wait_ready(device_t dev, int t) { struct siis_channel *ch = device_get_softc(dev); int timeout = 0; uint32_t val; while (((val = ATA_INL(ch->r_mem, SIIS_P_STS)) & SIIS_P_CTL_READY) == 0) { DELAY(1000); if (timeout++ > t) { device_printf(dev, "port is not ready (timeout %dms) " "status = %08x\n", t, val); return (EBUSY); } } return (0); } static void siis_reset(device_t dev) { struct siis_channel *ch = device_get_softc(dev); int i, retry = 0, sata_rev; uint32_t val; xpt_freeze_simq(ch->sim, 1); if (bootverbose) device_printf(dev, "SIIS reset...\n"); if (!ch->recoverycmd && !ch->recovery) xpt_freeze_simq(ch->sim, ch->numrslots); /* Requeue frozen command. */ if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status &= ~CAM_STATUS_MASK; fccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(fccb); } /* Requeue all running commands. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < SIIS_SLOT_RUNNING) continue; /* XXX; Commands in loading state. */ siis_end_transaction(&ch->slot[i], SIIS_ERR_INNOCENT); } /* Finish all held commands as-is. */ for (i = 0; i < SIIS_MAX_SLOTS; i++) { if (!ch->hold[i]) continue; xpt_done(ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } if (ch->toslots != 0) xpt_release_simq(ch->sim, TRUE); ch->eslots = 0; ch->recovery = 0; ch->toslots = 0; ch->fatalerr = 0; /* Disable port interrupts */ ATA_OUTL(ch->r_mem, SIIS_P_IECLR, 0x0000FFFF); /* Set speed limit. */ sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; if (sata_rev == 1) val = ATA_SC_SPD_SPEED_GEN1; else if (sata_rev == 2) val = ATA_SC_SPD_SPEED_GEN2; else if (sata_rev == 3) val = ATA_SC_SPD_SPEED_GEN3; else val = 0; ATA_OUTL(ch->r_mem, SIIS_P_SCTL, ATA_SC_DET_IDLE | val | ((ch->pm_level > 0) ? 0 : (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER))); retry: siis_devreset(dev); /* Reset and reconnect PHY, */ if (!siis_sata_connect(ch)) { ch->devices = 0; /* Enable port interrupts */ ATA_OUTL(ch->r_mem, SIIS_P_IESET, SIIS_P_IX_ENABLED); if (bootverbose) device_printf(dev, "SIIS reset done: phy reset found no device\n"); /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); xpt_release_simq(ch->sim, TRUE); return; } /* Wait for port ready status. */ if (siis_wait_ready(dev, 1000)) { device_printf(dev, "port ready timeout\n"); if (!retry) { device_printf(dev, "trying full port reset ...\n"); /* Get port to the reset state. */ ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PORT_RESET); DELAY(10000); /* Get port out of reset state. */ ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PORT_RESET); ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_32BIT); if (ch->pm_present) ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PME); else ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PME); siis_wait_ready(dev, 5000); retry = 1; goto retry; } } ch->devices = 1; /* Enable port interrupts */ ATA_OUTL(ch->r_mem, SIIS_P_IS, 0xFFFFFFFF); ATA_OUTL(ch->r_mem, SIIS_P_IESET, SIIS_P_IX_ENABLED); if (bootverbose) device_printf(dev, "SIIS reset done: devices=%08x\n", ch->devices); /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); xpt_release_simq(ch->sim, TRUE); } static int siis_setup_fis(device_t dev, struct siis_cmd *ctp, union ccb *ccb, int tag) { struct siis_channel *ch = device_get_softc(dev); u_int8_t *fis = &ctp->fis[0]; bzero(fis, 24); fis[0] = 0x27; /* host to device */ fis[1] = (ccb->ccb_h.target_id & 0x0f); if (ccb->ccb_h.func_code == XPT_SCSI_IO) { fis[1] |= 0x80; fis[2] = ATA_PACKET_CMD; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) fis[3] = ATA_F_DMA; else { fis[5] = ccb->csio.dxfer_len; fis[6] = ccb->csio.dxfer_len >> 8; } fis[7] = ATA_D_LBA; fis[15] = ATA_A_4BIT; bzero(ctp->u.atapi.ccb, 16); bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, ctp->u.atapi.ccb, ccb->csio.cdb_len); } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) { fis[1] |= 0x80; fis[2] = ccb->ataio.cmd.command; fis[3] = ccb->ataio.cmd.features; fis[4] = ccb->ataio.cmd.lba_low; fis[5] = ccb->ataio.cmd.lba_mid; fis[6] = ccb->ataio.cmd.lba_high; fis[7] = ccb->ataio.cmd.device; fis[8] = ccb->ataio.cmd.lba_low_exp; fis[9] = ccb->ataio.cmd.lba_mid_exp; fis[10] = ccb->ataio.cmd.lba_high_exp; fis[11] = ccb->ataio.cmd.features_exp; fis[12] = ccb->ataio.cmd.sector_count; if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { fis[12] &= 0x07; fis[12] |= tag << 3; } fis[13] = ccb->ataio.cmd.sector_count_exp; if (ccb->ataio.ata_flags & ATA_FLAG_ICC) fis[14] = ccb->ataio.icc; fis[15] = ATA_A_4BIT; if (ccb->ataio.ata_flags & ATA_FLAG_AUX) { fis[16] = ccb->ataio.aux & 0xff; fis[17] = (ccb->ataio.aux >> 8) & 0xff; fis[18] = (ccb->ataio.aux >> 16) & 0xff; fis[19] = (ccb->ataio.aux >> 24) & 0xff; } } else { /* Soft reset. */ } return (20); } static int siis_sata_connect(struct siis_channel *ch) { u_int32_t status; int timeout, found = 0; /* Wait up to 100ms for "connect well" */ for (timeout = 0; timeout < 1000 ; timeout++) { status = ATA_INL(ch->r_mem, SIIS_P_SSTS); if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) found = 1; if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) break; if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) { if (bootverbose) { device_printf(ch->dev, "SATA offline status=%08x\n", status); } return (0); } if (found == 0 && timeout >= 100) break; DELAY(100); } if (timeout >= 1000 || !found) { if (bootverbose) { device_printf(ch->dev, "SATA connect timeout time=%dus status=%08x\n", timeout * 100, status); } return (0); } if (bootverbose) { device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", timeout * 100, status); } /* Clear SATA error register */ ATA_OUTL(ch->r_mem, SIIS_P_SERR, 0xffffffff); return (1); } static int siis_check_ids(device_t dev, union ccb *ccb) { if (ccb->ccb_h.target_id > 15) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return (-1); } if (ccb->ccb_h.target_lun != 0) { ccb->ccb_h.status = CAM_LUN_INVALID; xpt_done(ccb); return (-1); } return (0); } static void siisaction(struct cam_sim *sim, union ccb *ccb) { device_t dev, parent; struct siis_channel *ch; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("siisaction func_code=%x\n", ccb->ccb_h.func_code)); ch = (struct siis_channel *)cam_sim_softc(sim); dev = ch->dev; mtx_assert(&ch->mtx, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ATA_IO: /* Execute the requested I/O operation */ case XPT_SCSI_IO: if (siis_check_ids(dev, ccb)) return; if (ch->devices == 0 || (ch->pm_present == 0 && ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } ccb->ccb_h.recovery_type = RECOVERY_NONE; /* Check for command collision. */ if (siis_check_collision(dev, ccb)) { /* Freeze command. */ ch->frozen = ccb; /* We have only one frozen slot, so freeze simq also. */ xpt_freeze_simq(ch->sim, 1); return; } siis_begin_transaction(dev, ccb); return; case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct siis_device *d; if (siis_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) d->revision = cts->xport_specific.sata.revision; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) d->mode = cts->xport_specific.sata.mode; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) d->bytecount = min(8192, cts->xport_specific.sata.bytecount); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) d->tags = min(SIIS_MAX_SLOTS, cts->xport_specific.sata.tags); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) { ch->pm_present = cts->xport_specific.sata.pm_present; if (ch->pm_present) ATA_OUTL(ch->r_mem, SIIS_P_CTLSET, SIIS_P_CTL_PME); else ATA_OUTL(ch->r_mem, SIIS_P_CTLCLR, SIIS_P_CTL_PME); } if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) d->atapi = cts->xport_specific.sata.atapi; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) d->caps = cts->xport_specific.sata.caps; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct siis_device *d; uint32_t status; if (siis_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; cts->protocol = PROTO_UNSPECIFIED; cts->protocol_version = PROTO_VERSION_UNSPECIFIED; cts->transport = XPORT_SATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->proto_specific.valid = 0; cts->xport_specific.sata.valid = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (ccb->ccb_h.target_id == 15 || (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { status = ATA_INL(ch->r_mem, SIIS_P_SSTS) & ATA_SS_SPD_MASK; if (status & 0x0f0) { cts->xport_specific.sata.revision = (status & 0x0f0) >> 4; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; } cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; if (ch->pm_level) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; cts->xport_specific.sata.caps &= ch->user[ccb->ccb_h.target_id].caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } else { cts->xport_specific.sata.revision = d->revision; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; cts->xport_specific.sata.caps = d->caps; if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (ch->quirks & SIIS_Q_SNTF) == 0) cts->xport_specific.sata.caps &= ~CTS_SATA_CAPS_H_AN; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } cts->xport_specific.sata.mode = d->mode; cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; cts->xport_specific.sata.bytecount = d->bytecount; cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; cts->xport_specific.sata.pm_present = ch->pm_present; cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; cts->xport_specific.sata.tags = d->tags; cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; cts->xport_specific.sata.atapi = d->atapi; cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ siis_reset(dev); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; parent = device_get_parent(dev); cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; cpi->hba_inquiry |= PI_SATAPM; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED | PIM_ATA_EXT; cpi->hba_eng_cnt = 0; cpi->max_target = 15; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "SIIS", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = maxphys; cpi->hba_vendor = pci_get_vendor(parent); cpi->hba_device = pci_get_device(parent); cpi->hba_subvendor = pci_get_subvendor(parent); cpi->hba_subdevice = pci_get_subdevice(parent); cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } static void siispoll(struct cam_sim *sim) { struct siis_channel *ch = (struct siis_channel *)cam_sim_softc(sim); siis_ch_intr(ch->dev); } diff --git a/sys/dev/smbus/smbus.c b/sys/dev/smbus/smbus.c index d6b4d9ab78b6..3de5cbc2b1f8 100644 --- a/sys/dev/smbus/smbus.c +++ b/sys/dev/smbus/smbus.c @@ -1,247 +1,245 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1998, 2001 Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * */ #include __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include #include -#include +#include #include #include #include "smbus_if.h" #include "bus_if.h" struct smbus_ivar { uint8_t addr; }; /* * Autoconfiguration and support routines for System Management bus */ static int smbus_probe(device_t dev) { device_set_desc(dev, "System Management Bus"); return (0); } static int smbus_attach(device_t dev) { struct smbus_softc *sc = device_get_softc(dev); mtx_init(&sc->lock, device_get_nameunit(dev), "smbus", MTX_DEF); bus_generic_probe(dev); bus_enumerate_hinted_children(dev); bus_generic_attach(dev); return (0); } static int smbus_detach(device_t dev) { struct smbus_softc *sc = device_get_softc(dev); int error; error = bus_generic_detach(dev); if (error) return (error); device_delete_children(dev); mtx_destroy(&sc->lock); return (0); } void smbus_generic_intr(device_t dev, u_char devaddr, char low, char high, int err) { } static device_t smbus_add_child(device_t dev, u_int order, const char *name, int unit) { struct smbus_ivar *devi; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(sizeof(struct smbus_ivar), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (NULL); } device_set_ivars(child, devi); return (child); } static void smbus_hinted_child(device_t bus, const char *dname, int dunit) { struct smbus_ivar *devi; device_t child; int addr; addr = 0; resource_int_value(dname, dunit, "addr", &addr); if (addr > UINT8_MAX) { device_printf(bus, "ignored incorrect slave address hint 0x%x" " for %s%d\n", addr, dname, dunit); return; } child = BUS_ADD_CHILD(bus, SMBUS_ORDER_HINTED, dname, dunit); if (child == NULL) return; devi = device_get_ivars(child); devi->addr = addr; } static int -smbus_child_location_str(device_t parent, device_t child, char *buf, - size_t buflen) +smbus_child_location(device_t parent, device_t child, struct sbuf *sb) { struct smbus_ivar *devi; devi = device_get_ivars(child); if (devi->addr != 0) - snprintf(buf, buflen, "addr=0x%x", devi->addr); - else if (buflen) - buf[0] = 0; + sbuf_printf(sb, "addr=0x%x", devi->addr); return (0); } static int smbus_print_child(device_t parent, device_t child) { struct smbus_ivar *devi; int retval; devi = device_get_ivars(child); retval = bus_print_child_header(parent, child); if (devi->addr != 0) retval += printf(" at addr 0x%x", devi->addr); retval += bus_print_child_footer(parent, child); return (retval); } static int smbus_read_ivar(device_t parent, device_t child, int which, uintptr_t *result) { struct smbus_ivar *devi; devi = device_get_ivars(child); switch (which) { case SMBUS_IVAR_ADDR: if (devi->addr != 0) *result = devi->addr; else *result = -1; break; default: return (ENOENT); } return (0); } static int smbus_write_ivar(device_t parent, device_t child, int which, uintptr_t value) { struct smbus_ivar *devi; devi = device_get_ivars(child); switch (which) { case SMBUS_IVAR_ADDR: /* Allow to set but no change the slave address. */ if (devi->addr != 0) return (EINVAL); devi->addr = value; break; default: return (ENOENT); } return (0); } static void smbus_probe_nomatch(device_t bus, device_t child) { struct smbus_ivar *devi = device_get_ivars(child); /* * Ignore (self-identified) devices without a slave address set. * For example, smb(4). */ if (devi->addr != 0) device_printf(bus, " at addr %#x\n", devi->addr); } /* * Device methods */ static device_method_t smbus_methods[] = { /* device interface */ DEVMETHOD(device_probe, smbus_probe), DEVMETHOD(device_attach, smbus_attach), DEVMETHOD(device_detach, smbus_detach), /* bus interface */ DEVMETHOD(bus_add_child, smbus_add_child), DEVMETHOD(bus_hinted_child, smbus_hinted_child), DEVMETHOD(bus_probe_nomatch, smbus_probe_nomatch), - DEVMETHOD(bus_child_location_str, smbus_child_location_str), + DEVMETHOD(bus_child_location, smbus_child_location), DEVMETHOD(bus_print_child, smbus_print_child), DEVMETHOD(bus_read_ivar, smbus_read_ivar), DEVMETHOD(bus_write_ivar, smbus_write_ivar), DEVMETHOD_END }; driver_t smbus_driver = { "smbus", smbus_methods, sizeof(struct smbus_softc), }; devclass_t smbus_devclass; MODULE_VERSION(smbus, SMBUS_MODVER); diff --git a/sys/dev/sound/pci/hda/hdaa.c b/sys/dev/sound/pci/hda/hdaa.c index 8eeeadce6666..ae7d77d431b2 100644 --- a/sys/dev/sound/pci/hda/hdaa.c +++ b/sys/dev/sound/pci/hda/hdaa.c @@ -1,7157 +1,7154 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Stephane E. Potvin * Copyright (c) 2006 Ariff Abdullah * Copyright (c) 2008-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Intel High Definition Audio (Audio function) driver for FreeBSD. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include #include "mixer_if.h" SND_DECLARE_FILE("$FreeBSD$"); #define hdaa_lock(devinfo) snd_mtxlock((devinfo)->lock) #define hdaa_unlock(devinfo) snd_mtxunlock((devinfo)->lock) #define hdaa_lockassert(devinfo) snd_mtxassert((devinfo)->lock) static const struct { const char *key; uint32_t value; } hdaa_quirks_tab[] = { { "softpcmvol", HDAA_QUIRK_SOFTPCMVOL }, { "fixedrate", HDAA_QUIRK_FIXEDRATE }, { "forcestereo", HDAA_QUIRK_FORCESTEREO }, { "eapdinv", HDAA_QUIRK_EAPDINV }, { "senseinv", HDAA_QUIRK_SENSEINV }, { "ivref50", HDAA_QUIRK_IVREF50 }, { "ivref80", HDAA_QUIRK_IVREF80 }, { "ivref100", HDAA_QUIRK_IVREF100 }, { "ovref50", HDAA_QUIRK_OVREF50 }, { "ovref80", HDAA_QUIRK_OVREF80 }, { "ovref100", HDAA_QUIRK_OVREF100 }, { "ivref", HDAA_QUIRK_IVREF }, { "ovref", HDAA_QUIRK_OVREF }, { "vref", HDAA_QUIRK_VREF }, }; #define HDA_PARSE_MAXDEPTH 10 MALLOC_DEFINE(M_HDAA, "hdaa", "HDA Audio"); static const char *HDA_COLORS[16] = {"Unknown", "Black", "Grey", "Blue", "Green", "Red", "Orange", "Yellow", "Purple", "Pink", "Res.A", "Res.B", "Res.C", "Res.D", "White", "Other"}; static const char *HDA_DEVS[16] = {"Line-out", "Speaker", "Headphones", "CD", "SPDIF-out", "Digital-out", "Modem-line", "Modem-handset", "Line-in", "AUX", "Mic", "Telephony", "SPDIF-in", "Digital-in", "Res.E", "Other"}; static const char *HDA_CONNS[4] = {"Jack", "None", "Fixed", "Both"}; static const char *HDA_CONNECTORS[16] = { "Unknown", "1/8", "1/4", "ATAPI", "RCA", "Optical", "Digital", "Analog", "DIN", "XLR", "RJ-11", "Combo", "0xc", "0xd", "0xe", "Other" }; static const char *HDA_LOCS[64] = { "0x00", "Rear", "Front", "Left", "Right", "Top", "Bottom", "Rear-panel", "Drive-bay", "0x09", "0x0a", "0x0b", "0x0c", "0x0d", "0x0e", "0x0f", "Internal", "0x11", "0x12", "0x13", "0x14", "0x15", "0x16", "Riser", "0x18", "Onboard", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f", "External", "Ext-Rear", "Ext-Front", "Ext-Left", "Ext-Right", "Ext-Top", "Ext-Bottom", "0x07", "0x28", "0x29", "0x2a", "0x2b", "0x2c", "0x2d", "0x2e", "0x2f", "Other", "0x31", "0x32", "0x33", "0x34", "0x35", "Other-Bott", "Lid-In", "Lid-Out", "0x39", "0x3a", "0x3b", "0x3c", "0x3d", "0x3e", "0x3f" }; static const char *HDA_GPIO_ACTIONS[8] = { "keep", "set", "clear", "disable", "input", "0x05", "0x06", "0x07"}; static const char *HDA_HDMI_CODING_TYPES[18] = { "undefined", "LPCM", "AC-3", "MPEG1", "MP3", "MPEG2", "AAC-LC", "DTS", "ATRAC", "DSD", "E-AC-3", "DTS-HD", "MLP", "DST", "WMAPro", "HE-AAC", "HE-AACv2", "MPEG-Surround" }; /* Default */ static uint32_t hdaa_fmt[] = { SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps hdaa_caps = {48000, 48000, hdaa_fmt, 0}; static const struct { uint32_t rate; int valid; uint16_t base; uint16_t mul; uint16_t div; } hda_rate_tab[] = { { 8000, 1, 0x0000, 0x0000, 0x0500 }, /* (48000 * 1) / 6 */ { 9600, 0, 0x0000, 0x0000, 0x0400 }, /* (48000 * 1) / 5 */ { 12000, 0, 0x0000, 0x0000, 0x0300 }, /* (48000 * 1) / 4 */ { 16000, 1, 0x0000, 0x0000, 0x0200 }, /* (48000 * 1) / 3 */ { 18000, 0, 0x0000, 0x1000, 0x0700 }, /* (48000 * 3) / 8 */ { 19200, 0, 0x0000, 0x0800, 0x0400 }, /* (48000 * 2) / 5 */ { 24000, 0, 0x0000, 0x0000, 0x0100 }, /* (48000 * 1) / 2 */ { 28800, 0, 0x0000, 0x1000, 0x0400 }, /* (48000 * 3) / 5 */ { 32000, 1, 0x0000, 0x0800, 0x0200 }, /* (48000 * 2) / 3 */ { 36000, 0, 0x0000, 0x1000, 0x0300 }, /* (48000 * 3) / 4 */ { 38400, 0, 0x0000, 0x1800, 0x0400 }, /* (48000 * 4) / 5 */ { 48000, 1, 0x0000, 0x0000, 0x0000 }, /* (48000 * 1) / 1 */ { 64000, 0, 0x0000, 0x1800, 0x0200 }, /* (48000 * 4) / 3 */ { 72000, 0, 0x0000, 0x1000, 0x0100 }, /* (48000 * 3) / 2 */ { 96000, 1, 0x0000, 0x0800, 0x0000 }, /* (48000 * 2) / 1 */ { 144000, 0, 0x0000, 0x1000, 0x0000 }, /* (48000 * 3) / 1 */ { 192000, 1, 0x0000, 0x1800, 0x0000 }, /* (48000 * 4) / 1 */ { 8820, 0, 0x4000, 0x0000, 0x0400 }, /* (44100 * 1) / 5 */ { 11025, 1, 0x4000, 0x0000, 0x0300 }, /* (44100 * 1) / 4 */ { 12600, 0, 0x4000, 0x0800, 0x0600 }, /* (44100 * 2) / 7 */ { 14700, 0, 0x4000, 0x0000, 0x0200 }, /* (44100 * 1) / 3 */ { 17640, 0, 0x4000, 0x0800, 0x0400 }, /* (44100 * 2) / 5 */ { 18900, 0, 0x4000, 0x1000, 0x0600 }, /* (44100 * 3) / 7 */ { 22050, 1, 0x4000, 0x0000, 0x0100 }, /* (44100 * 1) / 2 */ { 25200, 0, 0x4000, 0x1800, 0x0600 }, /* (44100 * 4) / 7 */ { 26460, 0, 0x4000, 0x1000, 0x0400 }, /* (44100 * 3) / 5 */ { 29400, 0, 0x4000, 0x0800, 0x0200 }, /* (44100 * 2) / 3 */ { 33075, 0, 0x4000, 0x1000, 0x0300 }, /* (44100 * 3) / 4 */ { 35280, 0, 0x4000, 0x1800, 0x0400 }, /* (44100 * 4) / 5 */ { 44100, 1, 0x4000, 0x0000, 0x0000 }, /* (44100 * 1) / 1 */ { 58800, 0, 0x4000, 0x1800, 0x0200 }, /* (44100 * 4) / 3 */ { 66150, 0, 0x4000, 0x1000, 0x0100 }, /* (44100 * 3) / 2 */ { 88200, 1, 0x4000, 0x0800, 0x0000 }, /* (44100 * 2) / 1 */ { 132300, 0, 0x4000, 0x1000, 0x0000 }, /* (44100 * 3) / 1 */ { 176400, 1, 0x4000, 0x1800, 0x0000 }, /* (44100 * 4) / 1 */ }; #define HDA_RATE_TAB_LEN (sizeof(hda_rate_tab) / sizeof(hda_rate_tab[0])) const static char *ossnames[] = SOUND_DEVICE_NAMES; /**************************************************************************** * Function prototypes ****************************************************************************/ static int hdaa_pcmchannel_setup(struct hdaa_chan *); static void hdaa_widget_connection_select(struct hdaa_widget *, uint8_t); static void hdaa_audio_ctl_amp_set(struct hdaa_audio_ctl *, uint32_t, int, int); static struct hdaa_audio_ctl *hdaa_audio_ctl_amp_get(struct hdaa_devinfo *, nid_t, int, int, int); static void hdaa_audio_ctl_amp_set_internal(struct hdaa_devinfo *, nid_t, int, int, int, int, int, int); static void hdaa_dump_pin_config(struct hdaa_widget *w, uint32_t conf); static char * hdaa_audio_ctl_ossmixer_mask2allname(uint32_t mask, char *buf, size_t len) { int i, first = 1; bzero(buf, len); for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (mask & (1 << i)) { if (first == 0) strlcat(buf, ", ", len); strlcat(buf, ossnames[i], len); first = 0; } } return (buf); } static struct hdaa_audio_ctl * hdaa_audio_ctl_each(struct hdaa_devinfo *devinfo, int *index) { if (devinfo == NULL || index == NULL || devinfo->ctl == NULL || devinfo->ctlcnt < 1 || *index < 0 || *index >= devinfo->ctlcnt) return (NULL); return (&devinfo->ctl[(*index)++]); } static struct hdaa_audio_ctl * hdaa_audio_ctl_amp_get(struct hdaa_devinfo *devinfo, nid_t nid, int dir, int index, int cnt) { struct hdaa_audio_ctl *ctl; int i, found = 0; if (devinfo == NULL || devinfo->ctl == NULL) return (NULL); i = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { if (ctl->enable == 0) continue; if (ctl->widget->nid != nid) continue; if (dir && ctl->ndir != dir) continue; if (index >= 0 && ctl->ndir == HDAA_CTL_IN && ctl->dir == ctl->ndir && ctl->index != index) continue; found++; if (found == cnt || cnt <= 0) return (ctl); } return (NULL); } static const struct matrix { struct pcmchan_matrix m; int analog; } matrixes[] = { { SND_CHN_MATRIX_MAP_1_0, 1 }, { SND_CHN_MATRIX_MAP_2_0, 1 }, { SND_CHN_MATRIX_MAP_2_1, 0 }, { SND_CHN_MATRIX_MAP_3_0, 0 }, { SND_CHN_MATRIX_MAP_3_1, 0 }, { SND_CHN_MATRIX_MAP_4_0, 1 }, { SND_CHN_MATRIX_MAP_4_1, 0 }, { SND_CHN_MATRIX_MAP_5_0, 0 }, { SND_CHN_MATRIX_MAP_5_1, 1 }, { SND_CHN_MATRIX_MAP_6_0, 0 }, { SND_CHN_MATRIX_MAP_6_1, 0 }, { SND_CHN_MATRIX_MAP_7_0, 0 }, { SND_CHN_MATRIX_MAP_7_1, 1 }, }; static const char *channel_names[] = SND_CHN_T_NAMES; /* * Connected channels change handler. */ static void hdaa_channels_handler(struct hdaa_audio_as *as) { struct hdaa_pcm_devinfo *pdevinfo = as->pdevinfo; struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_chan *ch = &devinfo->chans[as->chans[0]]; struct hdaa_widget *w; uint8_t *eld; int i, total, sub, assume, channels; uint16_t cpins, upins, tpins; cpins = upins = 0; eld = NULL; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; w = hdaa_widget_get(devinfo, as->pins[i]); if (w == NULL) continue; if (w->wclass.pin.connected == 1) cpins |= (1 << i); else if (w->wclass.pin.connected != 0) upins |= (1 << i); if (w->eld != NULL && w->eld_len >= 8) eld = w->eld; } tpins = cpins | upins; if (as->hpredir >= 0) tpins &= 0x7fff; if (tpins == 0) tpins = as->pinset; total = sub = assume = channels = 0; if (eld) { /* Map CEA speakers to sound(4) channels. */ if (eld[7] & 0x01) /* Front Left/Right */ channels |= SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR; if (eld[7] & 0x02) /* Low Frequency Effect */ channels |= SND_CHN_T_MASK_LF; if (eld[7] & 0x04) /* Front Center */ channels |= SND_CHN_T_MASK_FC; if (eld[7] & 0x08) { /* Rear Left/Right */ /* If we have both RLR and RLRC, report RLR as side. */ if (eld[7] & 0x40) /* Rear Left/Right Center */ channels |= SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR; else channels |= SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR; } if (eld[7] & 0x10) /* Rear center */ channels |= SND_CHN_T_MASK_BC; if (eld[7] & 0x20) /* Front Left/Right Center */ channels |= SND_CHN_T_MASK_FLC | SND_CHN_T_MASK_FRC; if (eld[7] & 0x40) /* Rear Left/Right Center */ channels |= SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR; } else if (as->pinset != 0 && (tpins & 0xffe0) == 0) { /* Map UAA speakers to sound(4) channels. */ if (tpins & 0x0001) channels |= SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR; if (tpins & 0x0002) channels |= SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF; if (tpins & 0x0004) channels |= SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR; if (tpins & 0x0008) channels |= SND_CHN_T_MASK_FLC | SND_CHN_T_MASK_FRC; if (tpins & 0x0010) { /* If there is no back pin, report side as back. */ if ((as->pinset & 0x0004) == 0) channels |= SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR; else channels |= SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR; } } else if (as->mixed) { /* Mixed assoc can be only stereo or theoretically mono. */ if (ch->channels == 1) channels |= SND_CHN_T_MASK_FC; else channels |= SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR; } if (channels) { /* We have some usable channels info. */ HDA_BOOTVERBOSE( device_printf(pdevinfo->dev, "%s channel set is: ", as->dir == HDAA_CTL_OUT ? "Playback" : "Recording"); for (i = 0; i < SND_CHN_T_MAX; i++) if (channels & (1 << i)) printf("%s, ", channel_names[i]); printf("\n"); ); /* Look for maximal fitting matrix. */ for (i = 0; i < sizeof(matrixes) / sizeof(struct matrix); i++) { if (as->pinset != 0 && matrixes[i].analog == 0) continue; if ((matrixes[i].m.mask & ~channels) == 0) { total = matrixes[i].m.channels; sub = matrixes[i].m.ext; } } } if (total == 0) { assume = 1; total = ch->channels; sub = (total == 6 || total == 8) ? 1 : 0; } HDA_BOOTVERBOSE( device_printf(pdevinfo->dev, "%s channel matrix is: %s%d.%d (%s)\n", as->dir == HDAA_CTL_OUT ? "Playback" : "Recording", assume ? "unknown, assuming " : "", total - sub, sub, cpins != 0 ? "connected" : (upins != 0 ? "unknown" : "disconnected")); ); } /* * Headphones redirection change handler. */ static void hdaa_hpredir_handler(struct hdaa_widget *w) { struct hdaa_devinfo *devinfo = w->devinfo; struct hdaa_audio_as *as = &devinfo->as[w->bindas]; struct hdaa_widget *w1; struct hdaa_audio_ctl *ctl; uint32_t val; int j, connected = w->wclass.pin.connected; HDA_BOOTVERBOSE( device_printf((as->pdevinfo && as->pdevinfo->dev) ? as->pdevinfo->dev : devinfo->dev, "Redirect output to: %s\n", connected ? "headphones": "main"); ); /* (Un)Mute headphone pin. */ ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, -1, 1); if (ctl != NULL && ctl->mute) { /* If pin has muter - use it. */ val = connected ? 0 : 1; if (val != ctl->forcemute) { ctl->forcemute = val; hdaa_audio_ctl_amp_set(ctl, HDAA_AMP_MUTE_DEFAULT, HDAA_AMP_VOL_DEFAULT, HDAA_AMP_VOL_DEFAULT); } } else { /* If there is no muter - disable pin output. */ if (connected) val = w->wclass.pin.ctrl | HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE; else val = w->wclass.pin.ctrl & ~HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE; if (val != w->wclass.pin.ctrl) { w->wclass.pin.ctrl = val; hda_command(devinfo->dev, HDA_CMD_SET_PIN_WIDGET_CTRL(0, w->nid, w->wclass.pin.ctrl)); } } /* (Un)Mute other pins. */ for (j = 0; j < 15; j++) { if (as->pins[j] <= 0) continue; ctl = hdaa_audio_ctl_amp_get(devinfo, as->pins[j], HDAA_CTL_IN, -1, 1); if (ctl != NULL && ctl->mute) { /* If pin has muter - use it. */ val = connected ? 1 : 0; if (val == ctl->forcemute) continue; ctl->forcemute = val; hdaa_audio_ctl_amp_set(ctl, HDAA_AMP_MUTE_DEFAULT, HDAA_AMP_VOL_DEFAULT, HDAA_AMP_VOL_DEFAULT); continue; } /* If there is no muter - disable pin output. */ w1 = hdaa_widget_get(devinfo, as->pins[j]); if (w1 != NULL) { if (connected) val = w1->wclass.pin.ctrl & ~HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE; else val = w1->wclass.pin.ctrl | HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE; if (val != w1->wclass.pin.ctrl) { w1->wclass.pin.ctrl = val; hda_command(devinfo->dev, HDA_CMD_SET_PIN_WIDGET_CTRL(0, w1->nid, w1->wclass.pin.ctrl)); } } } } /* * Recording source change handler. */ static void hdaa_autorecsrc_handler(struct hdaa_audio_as *as, struct hdaa_widget *w) { struct hdaa_pcm_devinfo *pdevinfo = as->pdevinfo; struct hdaa_devinfo *devinfo; struct hdaa_widget *w1; int i, mask, fullmask, prio, bestprio; char buf[128]; if (!as->mixed || pdevinfo == NULL || pdevinfo->mixer == NULL) return; /* Don't touch anything if we asked not to. */ if (pdevinfo->autorecsrc == 0 || (pdevinfo->autorecsrc == 1 && w != NULL)) return; /* Don't touch anything if "mix" or "speaker" selected. */ if (pdevinfo->recsrc & (SOUND_MASK_IMIX | SOUND_MASK_SPEAKER)) return; /* Don't touch anything if several selected. */ if (ffs(pdevinfo->recsrc) != fls(pdevinfo->recsrc)) return; devinfo = pdevinfo->devinfo; mask = fullmask = 0; bestprio = 0; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; w1 = hdaa_widget_get(devinfo, as->pins[i]); if (w1 == NULL || w1->enable == 0) continue; if (w1->wclass.pin.connected == 0) continue; prio = (w1->wclass.pin.connected == 1) ? 2 : 1; if (prio < bestprio) continue; if (prio > bestprio) { mask = 0; bestprio = prio; } mask |= (1 << w1->ossdev); fullmask |= (1 << w1->ossdev); } if (mask == 0) return; /* Prefer newly connected input. */ if (w != NULL && (mask & (1 << w->ossdev))) mask = (1 << w->ossdev); /* Prefer previously selected input */ if (mask & pdevinfo->recsrc) mask &= pdevinfo->recsrc; /* Prefer mic. */ if (mask & SOUND_MASK_MIC) mask = SOUND_MASK_MIC; /* Prefer monitor (2nd mic). */ if (mask & SOUND_MASK_MONITOR) mask = SOUND_MASK_MONITOR; /* Just take first one. */ mask = (1 << (ffs(mask) - 1)); HDA_BOOTVERBOSE( hdaa_audio_ctl_ossmixer_mask2allname(mask, buf, sizeof(buf)); device_printf(pdevinfo->dev, "Automatically set rec source to: %s\n", buf); ); hdaa_unlock(devinfo); mix_setrecsrc(pdevinfo->mixer, mask); hdaa_lock(devinfo); } /* * Jack presence detection event handler. */ static void hdaa_presence_handler(struct hdaa_widget *w) { struct hdaa_devinfo *devinfo = w->devinfo; struct hdaa_audio_as *as; uint32_t res; int connected, old; if (w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) return; if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(w->wclass.pin.cap) == 0 || (HDA_CONFIG_DEFAULTCONF_MISC(w->wclass.pin.config) & 1) != 0) return; res = hda_command(devinfo->dev, HDA_CMD_GET_PIN_SENSE(0, w->nid)); connected = (res & HDA_CMD_GET_PIN_SENSE_PRESENCE_DETECT) != 0; if (devinfo->quirks & HDAA_QUIRK_SENSEINV) connected = !connected; old = w->wclass.pin.connected; if (connected == old) return; w->wclass.pin.connected = connected; HDA_BOOTVERBOSE( if (connected || old != 2) { device_printf(devinfo->dev, "Pin sense: nid=%d sense=0x%08x (%sconnected)\n", w->nid, res, !connected ? "dis" : ""); } ); as = &devinfo->as[w->bindas]; if (as->hpredir >= 0 && as->pins[15] == w->nid) hdaa_hpredir_handler(w); if (as->dir == HDAA_CTL_IN && old != 2) hdaa_autorecsrc_handler(as, w); if (old != 2) hdaa_channels_handler(as); } /* * Callback for poll based presence detection. */ static void hdaa_jack_poll_callback(void *arg) { struct hdaa_devinfo *devinfo = arg; struct hdaa_widget *w; int i; hdaa_lock(devinfo); if (devinfo->poll_ival == 0) { hdaa_unlock(devinfo); return; } for (i = 0; i < devinfo->ascnt; i++) { if (devinfo->as[i].hpredir < 0) continue; w = hdaa_widget_get(devinfo, devinfo->as[i].pins[15]); if (w == NULL || w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; hdaa_presence_handler(w); } callout_reset(&devinfo->poll_jack, devinfo->poll_ival, hdaa_jack_poll_callback, devinfo); hdaa_unlock(devinfo); } static void hdaa_eld_dump(struct hdaa_widget *w) { struct hdaa_devinfo *devinfo = w->devinfo; device_t dev = devinfo->dev; uint8_t *sad; int len, mnl, i, sadc, fmt; if (w->eld == NULL || w->eld_len < 4) return; device_printf(dev, "ELD nid=%d: ELD_Ver=%u Baseline_ELD_Len=%u\n", w->nid, w->eld[0] >> 3, w->eld[2]); if ((w->eld[0] >> 3) != 0x02) return; len = min(w->eld_len, (u_int)w->eld[2] * 4); mnl = w->eld[4] & 0x1f; device_printf(dev, "ELD nid=%d: CEA_EDID_Ver=%u MNL=%u\n", w->nid, w->eld[4] >> 5, mnl); sadc = w->eld[5] >> 4; device_printf(dev, "ELD nid=%d: SAD_Count=%u Conn_Type=%u S_AI=%u HDCP=%u\n", w->nid, sadc, (w->eld[5] >> 2) & 0x3, (w->eld[5] >> 1) & 0x1, w->eld[5] & 0x1); device_printf(dev, "ELD nid=%d: Aud_Synch_Delay=%ums\n", w->nid, w->eld[6] * 2); device_printf(dev, "ELD nid=%d: Channels=0x%b\n", w->nid, w->eld[7], "\020\07RLRC\06FLRC\05RC\04RLR\03FC\02LFE\01FLR"); device_printf(dev, "ELD nid=%d: Port_ID=0x%02x%02x%02x%02x%02x%02x%02x%02x\n", w->nid, w->eld[8], w->eld[9], w->eld[10], w->eld[11], w->eld[12], w->eld[13], w->eld[14], w->eld[15]); device_printf(dev, "ELD nid=%d: Manufacturer_Name=0x%02x%02x\n", w->nid, w->eld[16], w->eld[17]); device_printf(dev, "ELD nid=%d: Product_Code=0x%02x%02x\n", w->nid, w->eld[18], w->eld[19]); device_printf(dev, "ELD nid=%d: Monitor_Name_String='%.*s'\n", w->nid, mnl, &w->eld[20]); for (i = 0; i < sadc; i++) { sad = &w->eld[20 + mnl + i * 3]; fmt = (sad[0] >> 3) & 0x0f; if (fmt == HDA_HDMI_CODING_TYPE_REF_CTX) { fmt = (sad[2] >> 3) & 0x1f; if (fmt < 1 || fmt > 3) fmt = 0; else fmt += 14; } device_printf(dev, "ELD nid=%d: %s %dch freqs=0x%b", w->nid, HDA_HDMI_CODING_TYPES[fmt], (sad[0] & 0x07) + 1, sad[1], "\020\007192\006176\00596\00488\00348\00244\00132"); switch (fmt) { case HDA_HDMI_CODING_TYPE_LPCM: printf(" sizes=0x%b", sad[2] & 0x07, "\020\00324\00220\00116"); break; case HDA_HDMI_CODING_TYPE_AC3: case HDA_HDMI_CODING_TYPE_MPEG1: case HDA_HDMI_CODING_TYPE_MP3: case HDA_HDMI_CODING_TYPE_MPEG2: case HDA_HDMI_CODING_TYPE_AACLC: case HDA_HDMI_CODING_TYPE_DTS: case HDA_HDMI_CODING_TYPE_ATRAC: printf(" max_bitrate=%d", sad[2] * 8000); break; case HDA_HDMI_CODING_TYPE_WMAPRO: printf(" profile=%d", sad[2] & 0x07); break; } printf("\n"); } } static void hdaa_eld_handler(struct hdaa_widget *w) { struct hdaa_devinfo *devinfo = w->devinfo; uint32_t res; int i; if (w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) return; if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(w->wclass.pin.cap) == 0 || (HDA_CONFIG_DEFAULTCONF_MISC(w->wclass.pin.config) & 1) != 0) return; res = hda_command(devinfo->dev, HDA_CMD_GET_PIN_SENSE(0, w->nid)); if ((w->eld != 0) == ((res & HDA_CMD_GET_PIN_SENSE_ELD_VALID) != 0)) return; if (w->eld != NULL) { w->eld_len = 0; free(w->eld, M_HDAA); w->eld = NULL; } HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Pin sense: nid=%d sense=0x%08x " "(%sconnected, ELD %svalid)\n", w->nid, res, (res & HDA_CMD_GET_PIN_SENSE_PRESENCE_DETECT) ? "" : "dis", (res & HDA_CMD_GET_PIN_SENSE_ELD_VALID) ? "" : "in"); ); if ((res & HDA_CMD_GET_PIN_SENSE_ELD_VALID) == 0) return; res = hda_command(devinfo->dev, HDA_CMD_GET_HDMI_DIP_SIZE(0, w->nid, 0x08)); if (res == HDA_INVALID) return; w->eld_len = res & 0xff; if (w->eld_len != 0) w->eld = malloc(w->eld_len, M_HDAA, M_ZERO | M_NOWAIT); if (w->eld == NULL) { w->eld_len = 0; return; } for (i = 0; i < w->eld_len; i++) { res = hda_command(devinfo->dev, HDA_CMD_GET_HDMI_ELDD(0, w->nid, i)); if (res & 0x80000000) w->eld[i] = res & 0xff; } HDA_BOOTVERBOSE( hdaa_eld_dump(w); ); hdaa_channels_handler(&devinfo->as[w->bindas]); } /* * Pin sense initializer. */ static void hdaa_sense_init(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as; struct hdaa_widget *w; int i, poll = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (HDA_PARAM_AUDIO_WIDGET_CAP_UNSOL_CAP(w->param.widget_cap)) { if (w->unsol < 0) w->unsol = HDAC_UNSOL_ALLOC( device_get_parent(devinfo->dev), devinfo->dev, w->nid); hda_command(devinfo->dev, HDA_CMD_SET_UNSOLICITED_RESPONSE(0, w->nid, HDA_CMD_SET_UNSOLICITED_RESPONSE_ENABLE | w->unsol)); } as = &devinfo->as[w->bindas]; if (as->hpredir >= 0 && as->pins[15] == w->nid) { if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(w->wclass.pin.cap) == 0 || (HDA_CONFIG_DEFAULTCONF_MISC(w->wclass.pin.config) & 1) != 0) { device_printf(devinfo->dev, "No presence detection support at nid %d\n", w->nid); } else { if (w->unsol < 0) poll = 1; HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Headphones redirection for " "association %d nid=%d using %s.\n", w->bindas, w->nid, (w->unsol < 0) ? "polling" : "unsolicited responses"); ); } } hdaa_presence_handler(w); if (!HDA_PARAM_PIN_CAP_DP(w->wclass.pin.cap) && !HDA_PARAM_PIN_CAP_HDMI(w->wclass.pin.cap)) continue; hdaa_eld_handler(w); } if (poll) { callout_reset(&devinfo->poll_jack, 1, hdaa_jack_poll_callback, devinfo); } } static void hdaa_sense_deinit(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; int i; callout_stop(&devinfo->poll_jack); for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (w->unsol < 0) continue; hda_command(devinfo->dev, HDA_CMD_SET_UNSOLICITED_RESPONSE(0, w->nid, 0)); HDAC_UNSOL_FREE( device_get_parent(devinfo->dev), devinfo->dev, w->unsol); w->unsol = -1; } } uint32_t hdaa_widget_pin_patch(uint32_t config, const char *str) { char buf[256]; char *key, *value, *rest, *bad; int ival, i; strlcpy(buf, str, sizeof(buf)); rest = buf; while ((key = strsep(&rest, "=")) != NULL) { value = strsep(&rest, " \t"); if (value == NULL) break; ival = strtol(value, &bad, 10); if (strcmp(key, "seq") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_SEQUENCE_MASK; config |= ((ival << HDA_CONFIG_DEFAULTCONF_SEQUENCE_SHIFT) & HDA_CONFIG_DEFAULTCONF_SEQUENCE_MASK); } else if (strcmp(key, "as") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_ASSOCIATION_MASK; config |= ((ival << HDA_CONFIG_DEFAULTCONF_ASSOCIATION_SHIFT) & HDA_CONFIG_DEFAULTCONF_ASSOCIATION_MASK); } else if (strcmp(key, "misc") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_MISC_MASK; config |= ((ival << HDA_CONFIG_DEFAULTCONF_MISC_SHIFT) & HDA_CONFIG_DEFAULTCONF_MISC_MASK); } else if (strcmp(key, "color") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_COLOR_MASK; if (bad[0] == 0) { config |= ((ival << HDA_CONFIG_DEFAULTCONF_COLOR_SHIFT) & HDA_CONFIG_DEFAULTCONF_COLOR_MASK); } for (i = 0; i < 16; i++) { if (strcasecmp(HDA_COLORS[i], value) == 0) { config |= (i << HDA_CONFIG_DEFAULTCONF_COLOR_SHIFT); break; } } } else if (strcmp(key, "ctype") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE_MASK; if (bad[0] == 0) { config |= ((ival << HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE_SHIFT) & HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE_MASK); } for (i = 0; i < 16; i++) { if (strcasecmp(HDA_CONNECTORS[i], value) == 0) { config |= (i << HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE_SHIFT); break; } } } else if (strcmp(key, "device") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_DEVICE_MASK; if (bad[0] == 0) { config |= ((ival << HDA_CONFIG_DEFAULTCONF_DEVICE_SHIFT) & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK); continue; } for (i = 0; i < 16; i++) { if (strcasecmp(HDA_DEVS[i], value) == 0) { config |= (i << HDA_CONFIG_DEFAULTCONF_DEVICE_SHIFT); break; } } } else if (strcmp(key, "loc") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_LOCATION_MASK; if (bad[0] == 0) { config |= ((ival << HDA_CONFIG_DEFAULTCONF_LOCATION_SHIFT) & HDA_CONFIG_DEFAULTCONF_LOCATION_MASK); continue; } for (i = 0; i < 64; i++) { if (strcasecmp(HDA_LOCS[i], value) == 0) { config |= (i << HDA_CONFIG_DEFAULTCONF_LOCATION_SHIFT); break; } } } else if (strcmp(key, "conn") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK; if (bad[0] == 0) { config |= ((ival << HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_SHIFT) & HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK); continue; } for (i = 0; i < 4; i++) { if (strcasecmp(HDA_CONNS[i], value) == 0) { config |= (i << HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_SHIFT); break; } } } } return (config); } uint32_t hdaa_gpio_patch(uint32_t gpio, const char *str) { char buf[256]; char *key, *value, *rest; int ikey, i; strlcpy(buf, str, sizeof(buf)); rest = buf; while ((key = strsep(&rest, "=")) != NULL) { value = strsep(&rest, " \t"); if (value == NULL) break; ikey = strtol(key, NULL, 10); if (ikey < 0 || ikey > 7) continue; for (i = 0; i < 7; i++) { if (strcasecmp(HDA_GPIO_ACTIONS[i], value) == 0) { gpio &= ~HDAA_GPIO_MASK(ikey); gpio |= i << HDAA_GPIO_SHIFT(ikey); break; } } } return (gpio); } static void hdaa_local_patch_pin(struct hdaa_widget *w) { device_t dev = w->devinfo->dev; const char *res = NULL; uint32_t config, orig; char buf[32]; config = orig = w->wclass.pin.config; snprintf(buf, sizeof(buf), "cad%u.nid%u.config", hda_get_codec_id(dev), w->nid); if (resource_string_value(device_get_name( device_get_parent(device_get_parent(dev))), device_get_unit(device_get_parent(device_get_parent(dev))), buf, &res) == 0) { if (strncmp(res, "0x", 2) == 0) { config = strtol(res + 2, NULL, 16); } else { config = hdaa_widget_pin_patch(config, res); } } snprintf(buf, sizeof(buf), "nid%u.config", w->nid); if (resource_string_value(device_get_name(dev), device_get_unit(dev), buf, &res) == 0) { if (strncmp(res, "0x", 2) == 0) { config = strtol(res + 2, NULL, 16); } else { config = hdaa_widget_pin_patch(config, res); } } HDA_BOOTVERBOSE( if (config != orig) device_printf(w->devinfo->dev, "Patching pin config nid=%u 0x%08x -> 0x%08x\n", w->nid, orig, config); ); w->wclass.pin.newconf = w->wclass.pin.config = config; } static void hdaa_dump_audio_formats_sb(struct sbuf *sb, uint32_t fcap, uint32_t pcmcap) { uint32_t cap; cap = fcap; if (cap != 0) { sbuf_printf(sb, " Stream cap: 0x%08x", cap); if (HDA_PARAM_SUPP_STREAM_FORMATS_AC3(cap)) sbuf_printf(sb, " AC3"); if (HDA_PARAM_SUPP_STREAM_FORMATS_FLOAT32(cap)) sbuf_printf(sb, " FLOAT32"); if (HDA_PARAM_SUPP_STREAM_FORMATS_PCM(cap)) sbuf_printf(sb, " PCM"); sbuf_printf(sb, "\n"); } cap = pcmcap; if (cap != 0) { sbuf_printf(sb, " PCM cap: 0x%08x", cap); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8BIT(cap)) sbuf_printf(sb, " 8"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16BIT(cap)) sbuf_printf(sb, " 16"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_20BIT(cap)) sbuf_printf(sb, " 20"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_24BIT(cap)) sbuf_printf(sb, " 24"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32BIT(cap)) sbuf_printf(sb, " 32"); sbuf_printf(sb, " bits,"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8KHZ(cap)) sbuf_printf(sb, " 8"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_11KHZ(cap)) sbuf_printf(sb, " 11"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16KHZ(cap)) sbuf_printf(sb, " 16"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_22KHZ(cap)) sbuf_printf(sb, " 22"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32KHZ(cap)) sbuf_printf(sb, " 32"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_44KHZ(cap)) sbuf_printf(sb, " 44"); sbuf_printf(sb, " 48"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_88KHZ(cap)) sbuf_printf(sb, " 88"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_96KHZ(cap)) sbuf_printf(sb, " 96"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_176KHZ(cap)) sbuf_printf(sb, " 176"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_192KHZ(cap)) sbuf_printf(sb, " 192"); sbuf_printf(sb, " KHz\n"); } } static void hdaa_dump_pin_sb(struct sbuf *sb, struct hdaa_widget *w) { uint32_t pincap, conf; pincap = w->wclass.pin.cap; sbuf_printf(sb, " Pin cap: 0x%08x", pincap); if (HDA_PARAM_PIN_CAP_IMP_SENSE_CAP(pincap)) sbuf_printf(sb, " ISC"); if (HDA_PARAM_PIN_CAP_TRIGGER_REQD(pincap)) sbuf_printf(sb, " TRQD"); if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(pincap)) sbuf_printf(sb, " PDC"); if (HDA_PARAM_PIN_CAP_HEADPHONE_CAP(pincap)) sbuf_printf(sb, " HP"); if (HDA_PARAM_PIN_CAP_OUTPUT_CAP(pincap)) sbuf_printf(sb, " OUT"); if (HDA_PARAM_PIN_CAP_INPUT_CAP(pincap)) sbuf_printf(sb, " IN"); if (HDA_PARAM_PIN_CAP_BALANCED_IO_PINS(pincap)) sbuf_printf(sb, " BAL"); if (HDA_PARAM_PIN_CAP_HDMI(pincap)) sbuf_printf(sb, " HDMI"); if (HDA_PARAM_PIN_CAP_VREF_CTRL(pincap)) { sbuf_printf(sb, " VREF["); if (HDA_PARAM_PIN_CAP_VREF_CTRL_50(pincap)) sbuf_printf(sb, " 50"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_80(pincap)) sbuf_printf(sb, " 80"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_100(pincap)) sbuf_printf(sb, " 100"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_GROUND(pincap)) sbuf_printf(sb, " GROUND"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_HIZ(pincap)) sbuf_printf(sb, " HIZ"); sbuf_printf(sb, " ]"); } if (HDA_PARAM_PIN_CAP_EAPD_CAP(pincap)) sbuf_printf(sb, " EAPD"); if (HDA_PARAM_PIN_CAP_DP(pincap)) sbuf_printf(sb, " DP"); if (HDA_PARAM_PIN_CAP_HBR(pincap)) sbuf_printf(sb, " HBR"); sbuf_printf(sb, "\n"); conf = w->wclass.pin.config; sbuf_printf(sb, " Pin config: 0x%08x", conf); sbuf_printf(sb, " as=%d seq=%d " "device=%s conn=%s ctype=%s loc=%s color=%s misc=%d\n", HDA_CONFIG_DEFAULTCONF_ASSOCIATION(conf), HDA_CONFIG_DEFAULTCONF_SEQUENCE(conf), HDA_DEVS[HDA_CONFIG_DEFAULTCONF_DEVICE(conf)], HDA_CONNS[HDA_CONFIG_DEFAULTCONF_CONNECTIVITY(conf)], HDA_CONNECTORS[HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE(conf)], HDA_LOCS[HDA_CONFIG_DEFAULTCONF_LOCATION(conf)], HDA_COLORS[HDA_CONFIG_DEFAULTCONF_COLOR(conf)], HDA_CONFIG_DEFAULTCONF_MISC(conf)); sbuf_printf(sb, " Pin control: 0x%08x", w->wclass.pin.ctrl); if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_HPHN_ENABLE) sbuf_printf(sb, " HP"); if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE) sbuf_printf(sb, " IN"); if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE) sbuf_printf(sb, " OUT"); if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) { if ((w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) == 0x03) sbuf_printf(sb, " HBR"); else if ((w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) != 0) sbuf_printf(sb, " EPTs"); } else { if ((w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) != 0) sbuf_printf(sb, " VREFs"); } sbuf_printf(sb, "\n"); } static void hdaa_dump_amp_sb(struct sbuf *sb, uint32_t cap, const char *banner) { int offset, size, step; offset = HDA_PARAM_OUTPUT_AMP_CAP_OFFSET(cap); size = HDA_PARAM_OUTPUT_AMP_CAP_STEPSIZE(cap); step = HDA_PARAM_OUTPUT_AMP_CAP_NUMSTEPS(cap); sbuf_printf(sb, " %s amp: 0x%08x " "mute=%d step=%d size=%d offset=%d (%+d/%+ddB)\n", banner, cap, HDA_PARAM_OUTPUT_AMP_CAP_MUTE_CAP(cap), step, size, offset, ((0 - offset) * (size + 1)) / 4, ((step - offset) * (size + 1)) / 4); } static int hdaa_sysctl_caps(SYSCTL_HANDLER_ARGS) { struct hdaa_devinfo *devinfo; struct hdaa_widget *w, *cw; struct sbuf sb; char buf[64]; int error, j; w = (struct hdaa_widget *)oidp->oid_arg1; devinfo = w->devinfo; sbuf_new_for_sysctl(&sb, NULL, 256, req); sbuf_printf(&sb, "%s%s\n", w->name, (w->enable == 0) ? " [DISABLED]" : ""); sbuf_printf(&sb, " Widget cap: 0x%08x", w->param.widget_cap); if (w->param.widget_cap & 0x0ee1) { if (HDA_PARAM_AUDIO_WIDGET_CAP_LR_SWAP(w->param.widget_cap)) sbuf_printf(&sb, " LRSWAP"); if (HDA_PARAM_AUDIO_WIDGET_CAP_POWER_CTRL(w->param.widget_cap)) sbuf_printf(&sb, " PWR"); if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) sbuf_printf(&sb, " DIGITAL"); if (HDA_PARAM_AUDIO_WIDGET_CAP_UNSOL_CAP(w->param.widget_cap)) sbuf_printf(&sb, " UNSOL"); if (HDA_PARAM_AUDIO_WIDGET_CAP_PROC_WIDGET(w->param.widget_cap)) sbuf_printf(&sb, " PROC"); if (HDA_PARAM_AUDIO_WIDGET_CAP_STRIPE(w->param.widget_cap)) sbuf_printf(&sb, " STRIPE(x%d)", 1 << (fls(w->wclass.conv.stripecap) - 1)); j = HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap); if (j == 1) sbuf_printf(&sb, " STEREO"); else if (j > 1) sbuf_printf(&sb, " %dCH", j + 1); } sbuf_printf(&sb, "\n"); if (w->bindas != -1) { sbuf_printf(&sb, " Association: %d (0x%04x)\n", w->bindas, w->bindseqmask); } if (w->ossmask != 0 || w->ossdev >= 0) { sbuf_printf(&sb, " OSS: %s", hdaa_audio_ctl_ossmixer_mask2allname(w->ossmask, buf, sizeof(buf))); if (w->ossdev >= 0) sbuf_printf(&sb, " (%s)", ossnames[w->ossdev]); sbuf_printf(&sb, "\n"); } if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) { hdaa_dump_audio_formats_sb(&sb, w->param.supp_stream_formats, w->param.supp_pcm_size_rate); } else if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->waspin) hdaa_dump_pin_sb(&sb, w); if (w->param.eapdbtl != HDA_INVALID) { sbuf_printf(&sb, " EAPD: 0x%08x%s%s%s\n", w->param.eapdbtl, (w->param.eapdbtl & HDA_CMD_SET_EAPD_BTL_ENABLE_LR_SWAP) ? " LRSWAP" : "", (w->param.eapdbtl & HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD) ? " EAPD" : "", (w->param.eapdbtl & HDA_CMD_SET_EAPD_BTL_ENABLE_BTL) ? " BTL" : ""); } if (HDA_PARAM_AUDIO_WIDGET_CAP_OUT_AMP(w->param.widget_cap) && w->param.outamp_cap != 0) hdaa_dump_amp_sb(&sb, w->param.outamp_cap, "Output"); if (HDA_PARAM_AUDIO_WIDGET_CAP_IN_AMP(w->param.widget_cap) && w->param.inamp_cap != 0) hdaa_dump_amp_sb(&sb, w->param.inamp_cap, " Input"); if (w->nconns > 0) sbuf_printf(&sb, " Connections: %d\n", w->nconns); for (j = 0; j < w->nconns; j++) { cw = hdaa_widget_get(devinfo, w->conns[j]); sbuf_printf(&sb, " + %s<- nid=%d [%s]", (w->connsenable[j] == 0)?"[DISABLED] ":"", w->conns[j], (cw == NULL) ? "GHOST!" : cw->name); if (cw == NULL) sbuf_printf(&sb, " [UNKNOWN]"); else if (cw->enable == 0) sbuf_printf(&sb, " [DISABLED]"); if (w->nconns > 1 && w->selconn == j && w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) sbuf_printf(&sb, " (selected)"); sbuf_printf(&sb, "\n"); } error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } static int hdaa_sysctl_config(SYSCTL_HANDLER_ARGS) { char buf[256]; int error; uint32_t conf; conf = *(uint32_t *)oidp->oid_arg1; snprintf(buf, sizeof(buf), "0x%08x as=%d seq=%d " "device=%s conn=%s ctype=%s loc=%s color=%s misc=%d", conf, HDA_CONFIG_DEFAULTCONF_ASSOCIATION(conf), HDA_CONFIG_DEFAULTCONF_SEQUENCE(conf), HDA_DEVS[HDA_CONFIG_DEFAULTCONF_DEVICE(conf)], HDA_CONNS[HDA_CONFIG_DEFAULTCONF_CONNECTIVITY(conf)], HDA_CONNECTORS[HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE(conf)], HDA_LOCS[HDA_CONFIG_DEFAULTCONF_LOCATION(conf)], HDA_COLORS[HDA_CONFIG_DEFAULTCONF_COLOR(conf)], HDA_CONFIG_DEFAULTCONF_MISC(conf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); if (strncmp(buf, "0x", 2) == 0) conf = strtol(buf + 2, NULL, 16); else conf = hdaa_widget_pin_patch(conf, buf); *(uint32_t *)oidp->oid_arg1 = conf; return (0); } static void hdaa_config_fetch(const char *str, uint32_t *on, uint32_t *off) { int i = 0, j, k, len, inv; for (;;) { while (str[i] != '\0' && (str[i] == ',' || isspace(str[i]) != 0)) i++; if (str[i] == '\0') return; j = i; while (str[j] != '\0' && !(str[j] == ',' || isspace(str[j]) != 0)) j++; len = j - i; if (len > 2 && strncmp(str + i, "no", 2) == 0) inv = 2; else inv = 0; for (k = 0; len > inv && k < nitems(hdaa_quirks_tab); k++) { if (strncmp(str + i + inv, hdaa_quirks_tab[k].key, len - inv) != 0) continue; if (len - inv != strlen(hdaa_quirks_tab[k].key)) continue; if (inv == 0) { *on |= hdaa_quirks_tab[k].value; *off &= ~hdaa_quirks_tab[k].value; } else { *off |= hdaa_quirks_tab[k].value; *on &= ~hdaa_quirks_tab[k].value; } break; } i = j; } } static int hdaa_sysctl_quirks(SYSCTL_HANDLER_ARGS) { char buf[256]; int error, n = 0, i; uint32_t quirks, quirks_off; quirks = *(uint32_t *)oidp->oid_arg1; buf[0] = 0; for (i = 0; i < nitems(hdaa_quirks_tab); i++) { if ((quirks & hdaa_quirks_tab[i].value) != 0) n += snprintf(buf + n, sizeof(buf) - n, "%s%s", n != 0 ? "," : "", hdaa_quirks_tab[i].key); } error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); if (strncmp(buf, "0x", 2) == 0) quirks = strtol(buf + 2, NULL, 16); else { quirks = 0; hdaa_config_fetch(buf, &quirks, &quirks_off); } *(uint32_t *)oidp->oid_arg1 = quirks; return (0); } static void hdaa_local_patch(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; const char *res = NULL; uint32_t quirks_on = 0, quirks_off = 0, x; int i; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL) continue; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) hdaa_local_patch_pin(w); } if (resource_string_value(device_get_name(devinfo->dev), device_get_unit(devinfo->dev), "config", &res) == 0) { if (res != NULL && strlen(res) > 0) hdaa_config_fetch(res, &quirks_on, &quirks_off); devinfo->quirks |= quirks_on; devinfo->quirks &= ~quirks_off; } if (devinfo->newquirks == -1) devinfo->newquirks = devinfo->quirks; else devinfo->quirks = devinfo->newquirks; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, "Config options: 0x%08x\n", devinfo->quirks); ); if (resource_string_value(device_get_name(devinfo->dev), device_get_unit(devinfo->dev), "gpio_config", &res) == 0) { if (strncmp(res, "0x", 2) == 0) { devinfo->gpio = strtol(res + 2, NULL, 16); } else { devinfo->gpio = hdaa_gpio_patch(devinfo->gpio, res); } } if (devinfo->newgpio == -1) devinfo->newgpio = devinfo->gpio; else devinfo->gpio = devinfo->newgpio; if (devinfo->newgpo == -1) devinfo->newgpo = devinfo->gpo; else devinfo->gpo = devinfo->newgpo; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, "GPIO config options:"); for (i = 0; i < 7; i++) { x = (devinfo->gpio & HDAA_GPIO_MASK(i)) >> HDAA_GPIO_SHIFT(i); if (x != 0) printf(" %d=%s", i, HDA_GPIO_ACTIONS[x]); } printf("\n"); ); } static void hdaa_widget_connection_parse(struct hdaa_widget *w) { uint32_t res; int i, j, max, ents, entnum; nid_t nid = w->nid; nid_t cnid, addcnid, prevcnid; w->nconns = 0; res = hda_command(w->devinfo->dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_CONN_LIST_LENGTH)); ents = HDA_PARAM_CONN_LIST_LENGTH_LIST_LENGTH(res); if (ents < 1) return; entnum = HDA_PARAM_CONN_LIST_LENGTH_LONG_FORM(res) ? 2 : 4; max = (sizeof(w->conns) / sizeof(w->conns[0])) - 1; prevcnid = 0; #define CONN_RMASK(e) (1 << ((32 / (e)) - 1)) #define CONN_NMASK(e) (CONN_RMASK(e) - 1) #define CONN_RESVAL(r, e, n) ((r) >> ((32 / (e)) * (n))) #define CONN_RANGE(r, e, n) (CONN_RESVAL(r, e, n) & CONN_RMASK(e)) #define CONN_CNID(r, e, n) (CONN_RESVAL(r, e, n) & CONN_NMASK(e)) for (i = 0; i < ents; i += entnum) { res = hda_command(w->devinfo->dev, HDA_CMD_GET_CONN_LIST_ENTRY(0, nid, i)); for (j = 0; j < entnum; j++) { cnid = CONN_CNID(res, entnum, j); if (cnid == 0) { if (w->nconns < ents) device_printf(w->devinfo->dev, "WARNING: nid=%d has zero cnid " "entnum=%d j=%d index=%d " "entries=%d found=%d res=0x%08x\n", nid, entnum, j, i, ents, w->nconns, res); else goto getconns_out; } if (cnid < w->devinfo->startnode || cnid >= w->devinfo->endnode) { HDA_BOOTVERBOSE( device_printf(w->devinfo->dev, "WARNING: nid=%d has cnid outside " "of the AFG range j=%d " "entnum=%d index=%d res=0x%08x\n", nid, j, entnum, i, res); ); } if (CONN_RANGE(res, entnum, j) == 0) addcnid = cnid; else if (prevcnid == 0 || prevcnid >= cnid) { device_printf(w->devinfo->dev, "WARNING: Invalid child range " "nid=%d index=%d j=%d entnum=%d " "prevcnid=%d cnid=%d res=0x%08x\n", nid, i, j, entnum, prevcnid, cnid, res); addcnid = cnid; } else addcnid = prevcnid + 1; while (addcnid <= cnid) { if (w->nconns > max) { device_printf(w->devinfo->dev, "Adding %d (nid=%d): " "Max connection reached! max=%d\n", addcnid, nid, max + 1); goto getconns_out; } w->connsenable[w->nconns] = 1; w->conns[w->nconns++] = addcnid++; } prevcnid = cnid; } } getconns_out: return; } static void hdaa_widget_parse(struct hdaa_widget *w) { device_t dev = w->devinfo->dev; uint32_t wcap, cap; nid_t nid = w->nid; char buf[64]; w->param.widget_cap = wcap = hda_command(dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_AUDIO_WIDGET_CAP)); w->type = HDA_PARAM_AUDIO_WIDGET_CAP_TYPE(wcap); hdaa_widget_connection_parse(w); if (HDA_PARAM_AUDIO_WIDGET_CAP_OUT_AMP(wcap)) { if (HDA_PARAM_AUDIO_WIDGET_CAP_AMP_OVR(wcap)) w->param.outamp_cap = hda_command(dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_OUTPUT_AMP_CAP)); else w->param.outamp_cap = w->devinfo->outamp_cap; } else w->param.outamp_cap = 0; if (HDA_PARAM_AUDIO_WIDGET_CAP_IN_AMP(wcap)) { if (HDA_PARAM_AUDIO_WIDGET_CAP_AMP_OVR(wcap)) w->param.inamp_cap = hda_command(dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_INPUT_AMP_CAP)); else w->param.inamp_cap = w->devinfo->inamp_cap; } else w->param.inamp_cap = 0; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) { if (HDA_PARAM_AUDIO_WIDGET_CAP_FORMAT_OVR(wcap)) { cap = hda_command(dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_SUPP_STREAM_FORMATS)); w->param.supp_stream_formats = (cap != 0) ? cap : w->devinfo->supp_stream_formats; cap = hda_command(dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_SUPP_PCM_SIZE_RATE)); w->param.supp_pcm_size_rate = (cap != 0) ? cap : w->devinfo->supp_pcm_size_rate; } else { w->param.supp_stream_formats = w->devinfo->supp_stream_formats; w->param.supp_pcm_size_rate = w->devinfo->supp_pcm_size_rate; } if (HDA_PARAM_AUDIO_WIDGET_CAP_STRIPE(w->param.widget_cap)) { w->wclass.conv.stripecap = hda_command(dev, HDA_CMD_GET_STRIPE_CONTROL(0, w->nid)) >> 20; } else w->wclass.conv.stripecap = 1; } else { w->param.supp_stream_formats = 0; w->param.supp_pcm_size_rate = 0; } if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) { w->wclass.pin.original = w->wclass.pin.newconf = w->wclass.pin.config = hda_command(dev, HDA_CMD_GET_CONFIGURATION_DEFAULT(0, w->nid)); w->wclass.pin.cap = hda_command(dev, HDA_CMD_GET_PARAMETER(0, w->nid, HDA_PARAM_PIN_CAP)); w->wclass.pin.ctrl = hda_command(dev, HDA_CMD_GET_PIN_WIDGET_CTRL(0, nid)); w->wclass.pin.connected = 2; if (HDA_PARAM_PIN_CAP_EAPD_CAP(w->wclass.pin.cap)) { w->param.eapdbtl = hda_command(dev, HDA_CMD_GET_EAPD_BTL_ENABLE(0, nid)); w->param.eapdbtl &= 0x7; w->param.eapdbtl |= HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD; } else w->param.eapdbtl = HDA_INVALID; } w->unsol = -1; hdaa_unlock(w->devinfo); snprintf(buf, sizeof(buf), "nid%d", w->nid); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, buf, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, w, 0, hdaa_sysctl_caps, "A", "Node capabilities"); if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) { snprintf(buf, sizeof(buf), "nid%d_config", w->nid); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, buf, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &w->wclass.pin.newconf, 0, hdaa_sysctl_config, "A", "Current pin configuration"); snprintf(buf, sizeof(buf), "nid%d_original", w->nid); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, buf, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, &w->wclass.pin.original, 0, hdaa_sysctl_config, "A", "Original pin configuration"); } hdaa_lock(w->devinfo); } static void hdaa_widget_postprocess(struct hdaa_widget *w) { const char *typestr; w->type = HDA_PARAM_AUDIO_WIDGET_CAP_TYPE(w->param.widget_cap); switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT: typestr = "audio output"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT: typestr = "audio input"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER: typestr = "audio mixer"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR: typestr = "audio selector"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX: typestr = "pin"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_POWER_WIDGET: typestr = "power widget"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_VOLUME_WIDGET: typestr = "volume widget"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_BEEP_WIDGET: typestr = "beep widget"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_VENDOR_WIDGET: typestr = "vendor widget"; break; default: typestr = "unknown type"; break; } strlcpy(w->name, typestr, sizeof(w->name)); if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) { uint32_t config; const char *devstr; int conn, color; config = w->wclass.pin.config; devstr = HDA_DEVS[(config & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK) >> HDA_CONFIG_DEFAULTCONF_DEVICE_SHIFT]; conn = (config & HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK) >> HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_SHIFT; color = (config & HDA_CONFIG_DEFAULTCONF_COLOR_MASK) >> HDA_CONFIG_DEFAULTCONF_COLOR_SHIFT; strlcat(w->name, ": ", sizeof(w->name)); strlcat(w->name, devstr, sizeof(w->name)); strlcat(w->name, " (", sizeof(w->name)); if (conn == 0 && color != 0 && color != 15) { strlcat(w->name, HDA_COLORS[color], sizeof(w->name)); strlcat(w->name, " ", sizeof(w->name)); } strlcat(w->name, HDA_CONNS[conn], sizeof(w->name)); strlcat(w->name, ")", sizeof(w->name)); } } struct hdaa_widget * hdaa_widget_get(struct hdaa_devinfo *devinfo, nid_t nid) { if (devinfo == NULL || devinfo->widget == NULL || nid < devinfo->startnode || nid >= devinfo->endnode) return (NULL); return (&devinfo->widget[nid - devinfo->startnode]); } static void hdaa_audio_ctl_amp_set_internal(struct hdaa_devinfo *devinfo, nid_t nid, int index, int lmute, int rmute, int left, int right, int dir) { uint16_t v = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, "Setting amplifier nid=%d index=%d %s mute=%d/%d vol=%d/%d\n", nid,index,dir ? "in" : "out",lmute,rmute,left,right); ); if (left != right || lmute != rmute) { v = (1 << (15 - dir)) | (1 << 13) | (index << 8) | (lmute << 7) | left; hda_command(devinfo->dev, HDA_CMD_SET_AMP_GAIN_MUTE(0, nid, v)); v = (1 << (15 - dir)) | (1 << 12) | (index << 8) | (rmute << 7) | right; } else v = (1 << (15 - dir)) | (3 << 12) | (index << 8) | (lmute << 7) | left; hda_command(devinfo->dev, HDA_CMD_SET_AMP_GAIN_MUTE(0, nid, v)); } static void hdaa_audio_ctl_amp_set(struct hdaa_audio_ctl *ctl, uint32_t mute, int left, int right) { nid_t nid; int lmute, rmute; nid = ctl->widget->nid; /* Save new values if valid. */ if (mute != HDAA_AMP_MUTE_DEFAULT) ctl->muted = mute; if (left != HDAA_AMP_VOL_DEFAULT) ctl->left = left; if (right != HDAA_AMP_VOL_DEFAULT) ctl->right = right; /* Prepare effective values */ if (ctl->forcemute) { lmute = 1; rmute = 1; left = 0; right = 0; } else { lmute = HDAA_AMP_LEFT_MUTED(ctl->muted); rmute = HDAA_AMP_RIGHT_MUTED(ctl->muted); left = ctl->left; right = ctl->right; } /* Apply effective values */ if (ctl->dir & HDAA_CTL_OUT) hdaa_audio_ctl_amp_set_internal(ctl->widget->devinfo, nid, ctl->index, lmute, rmute, left, right, 0); if (ctl->dir & HDAA_CTL_IN) hdaa_audio_ctl_amp_set_internal(ctl->widget->devinfo, nid, ctl->index, lmute, rmute, left, right, 1); } static void hdaa_widget_connection_select(struct hdaa_widget *w, uint8_t index) { if (w == NULL || w->nconns < 1 || index > (w->nconns - 1)) return; HDA_BOOTHVERBOSE( device_printf(w->devinfo->dev, "Setting selector nid=%d index=%d\n", w->nid, index); ); hda_command(w->devinfo->dev, HDA_CMD_SET_CONNECTION_SELECT_CONTROL(0, w->nid, index)); w->selconn = index; } /**************************************************************************** * Device Methods ****************************************************************************/ static void * hdaa_channel_init(kobj_t obj, void *data, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct hdaa_chan *ch = data; struct hdaa_pcm_devinfo *pdevinfo = ch->pdevinfo; struct hdaa_devinfo *devinfo = pdevinfo->devinfo; hdaa_lock(devinfo); if (devinfo->quirks & HDAA_QUIRK_FIXEDRATE) { ch->caps.minspeed = ch->caps.maxspeed = 48000; ch->pcmrates[0] = 48000; ch->pcmrates[1] = 0; } ch->dir = dir; ch->b = b; ch->c = c; ch->blksz = pdevinfo->chan_size / pdevinfo->chan_blkcnt; ch->blkcnt = pdevinfo->chan_blkcnt; hdaa_unlock(devinfo); if (sndbuf_alloc(ch->b, bus_get_dma_tag(devinfo->dev), hda_get_dma_nocache(devinfo->dev) ? BUS_DMA_NOCACHE : BUS_DMA_COHERENT, pdevinfo->chan_size) != 0) return (NULL); return (ch); } static int hdaa_channel_setformat(kobj_t obj, void *data, uint32_t format) { struct hdaa_chan *ch = data; int i; for (i = 0; ch->caps.fmtlist[i] != 0; i++) { if (format == ch->caps.fmtlist[i]) { ch->fmt = format; return (0); } } return (EINVAL); } static uint32_t hdaa_channel_setspeed(kobj_t obj, void *data, uint32_t speed) { struct hdaa_chan *ch = data; uint32_t spd = 0, threshold; int i; /* First look for equal or multiple frequency. */ for (i = 0; ch->pcmrates[i] != 0; i++) { spd = ch->pcmrates[i]; if (speed != 0 && spd / speed * speed == spd) { ch->spd = spd; return (spd); } } /* If no match, just find nearest. */ for (i = 0; ch->pcmrates[i] != 0; i++) { spd = ch->pcmrates[i]; threshold = spd + ((ch->pcmrates[i + 1] != 0) ? ((ch->pcmrates[i + 1] - spd) >> 1) : 0); if (speed < threshold) break; } ch->spd = spd; return (spd); } static uint16_t hdaa_stream_format(struct hdaa_chan *ch) { int i; uint16_t fmt; fmt = 0; if (ch->fmt & AFMT_S16_LE) fmt |= ch->bit16 << 4; else if (ch->fmt & AFMT_S32_LE) fmt |= ch->bit32 << 4; else fmt |= 1 << 4; for (i = 0; i < HDA_RATE_TAB_LEN; i++) { if (hda_rate_tab[i].valid && ch->spd == hda_rate_tab[i].rate) { fmt |= hda_rate_tab[i].base; fmt |= hda_rate_tab[i].mul; fmt |= hda_rate_tab[i].div; break; } } fmt |= (AFMT_CHANNEL(ch->fmt) - 1); return (fmt); } static int hdaa_allowed_stripes(uint16_t fmt) { static const int bits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; int size; size = bits[(fmt >> 4) & 0x03]; size *= (fmt & 0x0f) + 1; size *= ((fmt >> 11) & 0x07) + 1; return (0xffffffffU >> (32 - fls(size / 8))); } static void hdaa_audio_setup(struct hdaa_chan *ch) { struct hdaa_audio_as *as = &ch->devinfo->as[ch->as]; struct hdaa_widget *w, *wp; int i, j, k, chn, cchn, totalchn, totalextchn, c; uint16_t fmt, dfmt; /* Mapping channel pairs to codec pins/converters. */ const static uint16_t convmap[2][5] = /* 1.0 2.0 4.0 5.1 7.1 */ {{ 0x0010, 0x0001, 0x0201, 0x0231, 0x4231 }, /* no dup. */ { 0x0010, 0x0001, 0x2201, 0x2231, 0x4231 }}; /* side dup. */ /* Mapping formats to HDMI channel allocations. */ const static uint8_t hdmica[2][8] = /* 1 2 3 4 5 6 7 8 */ {{ 0x02, 0x00, 0x04, 0x08, 0x0a, 0x0e, 0x12, 0x12 }, /* x.0 */ { 0x01, 0x03, 0x01, 0x03, 0x09, 0x0b, 0x0f, 0x13 }}; /* x.1 */ /* Mapping formats to HDMI channels order. */ const static uint32_t hdmich[2][8] = /* 1 / 5 2 / 6 3 / 7 4 / 8 */ {{ 0xFFFF0F00, 0xFFFFFF10, 0xFFF2FF10, 0xFF32FF10, 0xFF324F10, 0xF5324F10, 0x54326F10, 0x54326F10 }, /* x.0 */ { 0xFFFFF000, 0xFFFF0100, 0xFFFFF210, 0xFFFF2310, 0xFF32F410, 0xFF324510, 0xF6324510, 0x76325410 }}; /* x.1 */ int convmapid = -1; nid_t nid; uint8_t csum; totalchn = AFMT_CHANNEL(ch->fmt); totalextchn = AFMT_EXTCHANNEL(ch->fmt); HDA_BOOTHVERBOSE( device_printf(ch->pdevinfo->dev, "PCMDIR_%s: Stream setup fmt=%08x (%d.%d) speed=%d\n", (ch->dir == PCMDIR_PLAY) ? "PLAY" : "REC", ch->fmt, totalchn - totalextchn, totalextchn, ch->spd); ); fmt = hdaa_stream_format(ch); /* Set channels to I/O converters mapping for known speaker setups. */ if ((as->pinset == 0x0007 || as->pinset == 0x0013) || /* Standard 5.1 */ (as->pinset == 0x0017)) /* Standard 7.1 */ convmapid = (ch->dir == PCMDIR_PLAY); dfmt = HDA_CMD_SET_DIGITAL_CONV_FMT1_DIGEN; if (ch->fmt & AFMT_AC3) dfmt |= HDA_CMD_SET_DIGITAL_CONV_FMT1_NAUDIO; chn = 0; for (i = 0; ch->io[i] != -1; i++) { w = hdaa_widget_get(ch->devinfo, ch->io[i]); if (w == NULL) continue; /* If HP redirection is enabled, but failed to use same DAC, make last DAC to duplicate first one. */ if (as->fakeredir && i == (as->pincnt - 1)) { c = (ch->sid << 4); } else { /* Map channels to I/O converters, if set. */ if (convmapid >= 0) chn = (((convmap[convmapid][totalchn / 2] >> i * 4) & 0xf) - 1) * 2; if (chn < 0 || chn >= totalchn) { c = 0; } else { c = (ch->sid << 4) | chn; } } hda_command(ch->devinfo->dev, HDA_CMD_SET_CONV_FMT(0, ch->io[i], fmt)); if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) { hda_command(ch->devinfo->dev, HDA_CMD_SET_DIGITAL_CONV_FMT1(0, ch->io[i], dfmt)); } hda_command(ch->devinfo->dev, HDA_CMD_SET_CONV_STREAM_CHAN(0, ch->io[i], c)); if (HDA_PARAM_AUDIO_WIDGET_CAP_STRIPE(w->param.widget_cap)) { hda_command(ch->devinfo->dev, HDA_CMD_SET_STRIPE_CONTROL(0, w->nid, ch->stripectl)); } cchn = HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap); if (cchn > 1 && chn < totalchn) { cchn = min(cchn, totalchn - chn - 1); hda_command(ch->devinfo->dev, HDA_CMD_SET_CONV_CHAN_COUNT(0, ch->io[i], cchn)); } HDA_BOOTHVERBOSE( device_printf(ch->pdevinfo->dev, "PCMDIR_%s: Stream setup nid=%d: " "fmt=0x%04x, dfmt=0x%04x, chan=0x%04x, " "chan_count=0x%02x, stripe=%d\n", (ch->dir == PCMDIR_PLAY) ? "PLAY" : "REC", ch->io[i], fmt, dfmt, c, cchn, ch->stripectl); ); for (j = 0; j < 16; j++) { if (as->dacs[ch->asindex][j] != ch->io[i]) continue; nid = as->pins[j]; wp = hdaa_widget_get(ch->devinfo, nid); if (wp == NULL) continue; if (!HDA_PARAM_PIN_CAP_DP(wp->wclass.pin.cap) && !HDA_PARAM_PIN_CAP_HDMI(wp->wclass.pin.cap)) continue; /* Set channel mapping. */ for (k = 0; k < 8; k++) { hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_CHAN_SLOT(0, nid, (((hdmich[totalextchn == 0 ? 0 : 1][totalchn - 1] >> (k * 4)) & 0xf) << 4) | k)); } /* * Enable High Bit Rate (HBR) Encoded Packet Type * (EPT), if supported and needed (8ch data). */ if (HDA_PARAM_PIN_CAP_HDMI(wp->wclass.pin.cap) && HDA_PARAM_PIN_CAP_HBR(wp->wclass.pin.cap)) { wp->wclass.pin.ctrl &= ~HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK; if ((ch->fmt & AFMT_AC3) && (cchn == 7)) wp->wclass.pin.ctrl |= 0x03; hda_command(ch->devinfo->dev, HDA_CMD_SET_PIN_WIDGET_CTRL(0, nid, wp->wclass.pin.ctrl)); } /* Stop audio infoframe transmission. */ hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_INDEX(0, nid, 0x00)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_XMIT(0, nid, 0x00)); /* Clear audio infoframe buffer. */ hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_INDEX(0, nid, 0x00)); for (k = 0; k < 32; k++) hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x00)); /* Write HDMI/DisplayPort audio infoframe. */ hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_INDEX(0, nid, 0x00)); if (w->eld != NULL && w->eld_len >= 6 && ((w->eld[5] >> 2) & 0x3) == 1) { /* DisplayPort */ hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x84)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x1b)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x44)); } else { /* HDMI */ hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x84)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x01)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x0a)); csum = 0; csum -= 0x84 + 0x01 + 0x0a + (totalchn - 1) + hdmica[totalextchn == 0 ? 0 : 1][totalchn - 1]; hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, csum)); } hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, totalchn - 1)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x00)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x00)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, hdmica[totalextchn == 0 ? 0 : 1][totalchn - 1])); /* Start audio infoframe transmission. */ hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_INDEX(0, nid, 0x00)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_XMIT(0, nid, 0xc0)); } chn += cchn + 1; } } /* * Greatest Common Divisor. */ static unsigned gcd(unsigned a, unsigned b) { u_int c; while (b != 0) { c = a; a = b; b = (c % b); } return (a); } /* * Least Common Multiple. */ static unsigned lcm(unsigned a, unsigned b) { return ((a * b) / gcd(a, b)); } static int hdaa_channel_setfragments(kobj_t obj, void *data, uint32_t blksz, uint32_t blkcnt) { struct hdaa_chan *ch = data; blksz -= blksz % lcm(HDA_DMA_ALIGNMENT, sndbuf_getalign(ch->b)); if (blksz > (sndbuf_getmaxsize(ch->b) / HDA_BDL_MIN)) blksz = sndbuf_getmaxsize(ch->b) / HDA_BDL_MIN; if (blksz < HDA_BLK_MIN) blksz = HDA_BLK_MIN; if (blkcnt > HDA_BDL_MAX) blkcnt = HDA_BDL_MAX; if (blkcnt < HDA_BDL_MIN) blkcnt = HDA_BDL_MIN; while ((blksz * blkcnt) > sndbuf_getmaxsize(ch->b)) { if ((blkcnt >> 1) >= HDA_BDL_MIN) blkcnt >>= 1; else if ((blksz >> 1) >= HDA_BLK_MIN) blksz >>= 1; else break; } if ((sndbuf_getblksz(ch->b) != blksz || sndbuf_getblkcnt(ch->b) != blkcnt) && sndbuf_resize(ch->b, blkcnt, blksz) != 0) device_printf(ch->devinfo->dev, "%s: failed blksz=%u blkcnt=%u\n", __func__, blksz, blkcnt); ch->blksz = sndbuf_getblksz(ch->b); ch->blkcnt = sndbuf_getblkcnt(ch->b); return (0); } static uint32_t hdaa_channel_setblocksize(kobj_t obj, void *data, uint32_t blksz) { struct hdaa_chan *ch = data; hdaa_channel_setfragments(obj, data, blksz, ch->pdevinfo->chan_blkcnt); return (ch->blksz); } static void hdaa_channel_stop(struct hdaa_chan *ch) { struct hdaa_devinfo *devinfo = ch->devinfo; struct hdaa_widget *w; int i; if ((ch->flags & HDAA_CHN_RUNNING) == 0) return; ch->flags &= ~HDAA_CHN_RUNNING; HDAC_STREAM_STOP(device_get_parent(devinfo->dev), devinfo->dev, ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid); for (i = 0; ch->io[i] != -1; i++) { w = hdaa_widget_get(ch->devinfo, ch->io[i]); if (w == NULL) continue; if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) { hda_command(devinfo->dev, HDA_CMD_SET_DIGITAL_CONV_FMT1(0, ch->io[i], 0)); } hda_command(devinfo->dev, HDA_CMD_SET_CONV_STREAM_CHAN(0, ch->io[i], 0)); } HDAC_STREAM_FREE(device_get_parent(devinfo->dev), devinfo->dev, ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid); } static int hdaa_channel_start(struct hdaa_chan *ch) { struct hdaa_devinfo *devinfo = ch->devinfo; uint32_t fmt; fmt = hdaa_stream_format(ch); ch->stripectl = fls(ch->stripecap & hdaa_allowed_stripes(fmt) & hda_get_stripes_mask(devinfo->dev)) - 1; ch->sid = HDAC_STREAM_ALLOC(device_get_parent(devinfo->dev), devinfo->dev, ch->dir == PCMDIR_PLAY ? 1 : 0, fmt, ch->stripectl, &ch->dmapos); if (ch->sid <= 0) return (EBUSY); hdaa_audio_setup(ch); HDAC_STREAM_RESET(device_get_parent(devinfo->dev), devinfo->dev, ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid); HDAC_STREAM_START(device_get_parent(devinfo->dev), devinfo->dev, ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid, sndbuf_getbufaddr(ch->b), ch->blksz, ch->blkcnt); ch->flags |= HDAA_CHN_RUNNING; return (0); } static int hdaa_channel_trigger(kobj_t obj, void *data, int go) { struct hdaa_chan *ch = data; int error = 0; if (!PCMTRIG_COMMON(go)) return (0); hdaa_lock(ch->devinfo); switch (go) { case PCMTRIG_START: error = hdaa_channel_start(ch); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: hdaa_channel_stop(ch); break; default: break; } hdaa_unlock(ch->devinfo); return (error); } static uint32_t hdaa_channel_getptr(kobj_t obj, void *data) { struct hdaa_chan *ch = data; struct hdaa_devinfo *devinfo = ch->devinfo; uint32_t ptr; hdaa_lock(devinfo); if (ch->dmapos != NULL) { ptr = *(ch->dmapos); } else { ptr = HDAC_STREAM_GETPTR( device_get_parent(devinfo->dev), devinfo->dev, ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid); } hdaa_unlock(devinfo); /* * Round to available space and force 128 bytes aligment. */ ptr %= ch->blksz * ch->blkcnt; ptr &= HDA_BLK_ALIGN; return (ptr); } static struct pcmchan_caps * hdaa_channel_getcaps(kobj_t obj, void *data) { return (&((struct hdaa_chan *)data)->caps); } static kobj_method_t hdaa_channel_methods[] = { KOBJMETHOD(channel_init, hdaa_channel_init), KOBJMETHOD(channel_setformat, hdaa_channel_setformat), KOBJMETHOD(channel_setspeed, hdaa_channel_setspeed), KOBJMETHOD(channel_setblocksize, hdaa_channel_setblocksize), KOBJMETHOD(channel_setfragments, hdaa_channel_setfragments), KOBJMETHOD(channel_trigger, hdaa_channel_trigger), KOBJMETHOD(channel_getptr, hdaa_channel_getptr), KOBJMETHOD(channel_getcaps, hdaa_channel_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(hdaa_channel); static int hdaa_audio_ctl_ossmixer_init(struct snd_mixer *m) { struct hdaa_pcm_devinfo *pdevinfo = mix_getdevinfo(m); struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w, *cw; uint32_t mask, recmask; int i, j; hdaa_lock(devinfo); pdevinfo->mixer = m; /* Make sure that in case of soft volume it won't stay muted. */ for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { pdevinfo->left[i] = 100; pdevinfo->right[i] = 100; } /* Declare volume controls assigned to this association. */ mask = pdevinfo->ossmask; if (pdevinfo->playas >= 0) { /* Declate EAPD as ogain control. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->param.eapdbtl == HDA_INVALID || w->bindas != pdevinfo->playas) continue; mask |= SOUND_MASK_OGAIN; break; } /* Declare soft PCM volume if needed. */ if ((mask & SOUND_MASK_PCM) == 0 || (devinfo->quirks & HDAA_QUIRK_SOFTPCMVOL) || pdevinfo->minamp[SOUND_MIXER_PCM] == pdevinfo->maxamp[SOUND_MIXER_PCM]) { mask |= SOUND_MASK_PCM; pcm_setflags(pdevinfo->dev, pcm_getflags(pdevinfo->dev) | SD_F_SOFTPCMVOL); HDA_BOOTHVERBOSE( device_printf(pdevinfo->dev, "Forcing Soft PCM volume\n"); ); } /* Declare master volume if needed. */ if ((mask & SOUND_MASK_VOLUME) == 0) { mask |= SOUND_MASK_VOLUME; mix_setparentchild(m, SOUND_MIXER_VOLUME, SOUND_MASK_PCM); mix_setrealdev(m, SOUND_MIXER_VOLUME, SOUND_MIXER_NONE); HDA_BOOTHVERBOSE( device_printf(pdevinfo->dev, "Forcing master volume with PCM\n"); ); } } /* Declare record sources available to this association. */ recmask = 0; if (pdevinfo->recas >= 0) { for (i = 0; i < 16; i++) { if (devinfo->as[pdevinfo->recas].dacs[0][i] < 0) continue; w = hdaa_widget_get(devinfo, devinfo->as[pdevinfo->recas].dacs[0][i]); if (w == NULL || w->enable == 0) continue; for (j = 0; j < w->nconns; j++) { if (w->connsenable[j] == 0) continue; cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || cw->enable == 0) continue; if (cw->bindas != pdevinfo->recas && cw->bindas != -2) continue; recmask |= cw->ossmask; } } } recmask &= (1 << SOUND_MIXER_NRDEVICES) - 1; mask &= (1 << SOUND_MIXER_NRDEVICES) - 1; pdevinfo->ossmask = mask; mix_setrecdevs(m, recmask); mix_setdevs(m, mask); hdaa_unlock(devinfo); return (0); } /* * Update amplification per pdevinfo per ossdev, calculate summary coefficient * and write it to codec, update *left and *right to reflect remaining error. */ static void hdaa_audio_ctl_dev_set(struct hdaa_audio_ctl *ctl, int ossdev, int mute, int *left, int *right) { int i, zleft, zright, sleft, sright, smute, lval, rval; ctl->devleft[ossdev] = *left; ctl->devright[ossdev] = *right; ctl->devmute[ossdev] = mute; smute = sleft = sright = zleft = zright = 0; for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { sleft += ctl->devleft[i]; sright += ctl->devright[i]; smute |= ctl->devmute[i]; if (i == ossdev) continue; zleft += ctl->devleft[i]; zright += ctl->devright[i]; } lval = QDB2VAL(ctl, sleft); rval = QDB2VAL(ctl, sright); hdaa_audio_ctl_amp_set(ctl, smute, lval, rval); *left -= VAL2QDB(ctl, lval) - VAL2QDB(ctl, QDB2VAL(ctl, zleft)); *right -= VAL2QDB(ctl, rval) - VAL2QDB(ctl, QDB2VAL(ctl, zright)); } /* * Trace signal from source, setting volumes on the way. */ static void hdaa_audio_ctl_source_volume(struct hdaa_pcm_devinfo *pdevinfo, int ossdev, nid_t nid, int index, int mute, int left, int right, int depth) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w, *wc; struct hdaa_audio_ctl *ctl; int i, j, conns = 0; if (depth > HDA_PARSE_MAXDEPTH) return; w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return; /* Count number of active inputs. */ if (depth > 0) { for (j = 0; j < w->nconns; j++) { if (!w->connsenable[j]) continue; conns++; } } /* If this is not a first step - use input mixer. Pins have common input ctl so care must be taken. */ if (depth > 0 && (conns == 1 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)) { ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, index, 1); if (ctl) hdaa_audio_ctl_dev_set(ctl, ossdev, mute, &left, &right); } /* If widget has own ossdev - not traverse it. It will be traversed on its own. */ if (w->ossdev >= 0 && depth > 0) return; /* We must not traverse pin */ if ((w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) && depth > 0) return; /* * If signals mixed, we can't assign controls farther. * Ignore this on depth zero. Caller must knows why. */ if (conns > 1 && (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER || w->selconn != index)) return; ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_OUT, -1, 1); if (ctl) hdaa_audio_ctl_dev_set(ctl, ossdev, mute, &left, &right); for (i = devinfo->startnode; i < devinfo->endnode; i++) { wc = hdaa_widget_get(devinfo, i); if (wc == NULL || wc->enable == 0) continue; for (j = 0; j < wc->nconns; j++) { if (wc->connsenable[j] && wc->conns[j] == nid) { hdaa_audio_ctl_source_volume(pdevinfo, ossdev, wc->nid, j, mute, left, right, depth + 1); } } } return; } /* * Trace signal from destination, setting volumes on the way. */ static void hdaa_audio_ctl_dest_volume(struct hdaa_pcm_devinfo *pdevinfo, int ossdev, nid_t nid, int index, int mute, int left, int right, int depth) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w, *wc; struct hdaa_audio_ctl *ctl; int i, j, consumers, cleft, cright; if (depth > HDA_PARSE_MAXDEPTH) return; w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return; if (depth > 0) { /* If this node produce output for several consumers, we can't touch it. */ consumers = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { wc = hdaa_widget_get(devinfo, i); if (wc == NULL || wc->enable == 0) continue; for (j = 0; j < wc->nconns; j++) { if (wc->connsenable[j] && wc->conns[j] == nid) consumers++; } } /* The only exception is if real HP redirection is configured and this is a duplication point. XXX: Actually exception is not completely correct. XXX: Duplication point check is not perfect. */ if ((consumers == 2 && (w->bindas < 0 || as[w->bindas].hpredir < 0 || as[w->bindas].fakeredir || (w->bindseqmask & (1 << 15)) == 0)) || consumers > 2) return; /* Else use it's output mixer. */ ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_OUT, -1, 1); if (ctl) hdaa_audio_ctl_dev_set(ctl, ossdev, mute, &left, &right); } /* We must not traverse pin */ if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && depth > 0) return; for (i = 0; i < w->nconns; i++) { if (w->connsenable[i] == 0) continue; if (index >= 0 && i != index) continue; cleft = left; cright = right; ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, i, 1); if (ctl) hdaa_audio_ctl_dev_set(ctl, ossdev, mute, &cleft, &cright); hdaa_audio_ctl_dest_volume(pdevinfo, ossdev, w->conns[i], -1, mute, cleft, cright, depth + 1); } } /* * Set volumes for the specified pdevinfo and ossdev. */ static void hdaa_audio_ctl_dev_volume(struct hdaa_pcm_devinfo *pdevinfo, unsigned dev) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w, *cw; uint32_t mute; int lvol, rvol; int i, j; mute = 0; if (pdevinfo->left[dev] == 0) { mute |= HDAA_AMP_MUTE_LEFT; lvol = -4000; } else lvol = ((pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]) * pdevinfo->left[dev] + 50) / 100 + pdevinfo->minamp[dev]; if (pdevinfo->right[dev] == 0) { mute |= HDAA_AMP_MUTE_RIGHT; rvol = -4000; } else rvol = ((pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]) * pdevinfo->right[dev] + 50) / 100 + pdevinfo->minamp[dev]; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->bindas < 0) { if (pdevinfo->index != 0) continue; } else { if (w->bindas != pdevinfo->playas && w->bindas != pdevinfo->recas) continue; } if (dev == SOUND_MIXER_RECLEV && w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) { hdaa_audio_ctl_dest_volume(pdevinfo, dev, w->nid, -1, mute, lvol, rvol, 0); continue; } if (dev == SOUND_MIXER_VOLUME && w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && devinfo->as[w->bindas].dir == HDAA_CTL_OUT) { hdaa_audio_ctl_dest_volume(pdevinfo, dev, w->nid, -1, mute, lvol, rvol, 0); continue; } if (dev == SOUND_MIXER_IGAIN && w->pflags & HDAA_ADC_MONITOR) { for (j = 0; j < w->nconns; j++) { if (!w->connsenable[j]) continue; cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || cw->enable == 0) continue; if (cw->bindas == -1) continue; if (cw->bindas >= 0 && devinfo->as[cw->bindas].dir != HDAA_CTL_IN) continue; hdaa_audio_ctl_dest_volume(pdevinfo, dev, w->nid, j, mute, lvol, rvol, 0); } continue; } if (w->ossdev != dev) continue; hdaa_audio_ctl_source_volume(pdevinfo, dev, w->nid, -1, mute, lvol, rvol, 0); if (dev == SOUND_MIXER_IMIX && (w->pflags & HDAA_IMIX_AS_DST)) hdaa_audio_ctl_dest_volume(pdevinfo, dev, w->nid, -1, mute, lvol, rvol, 0); } } /* * OSS Mixer set method. */ static int hdaa_audio_ctl_ossmixer_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct hdaa_pcm_devinfo *pdevinfo = mix_getdevinfo(m); struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w; int i; hdaa_lock(devinfo); /* Save new values. */ pdevinfo->left[dev] = left; pdevinfo->right[dev] = right; /* 'ogain' is the special case implemented with EAPD. */ if (dev == SOUND_MIXER_OGAIN) { uint32_t orig; w = NULL; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->param.eapdbtl == HDA_INVALID) continue; break; } if (i >= devinfo->endnode) { hdaa_unlock(devinfo); return (-1); } orig = w->param.eapdbtl; if (left == 0) w->param.eapdbtl &= ~HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD; else w->param.eapdbtl |= HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD; if (orig != w->param.eapdbtl) { uint32_t val; val = w->param.eapdbtl; if (devinfo->quirks & HDAA_QUIRK_EAPDINV) val ^= HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD; hda_command(devinfo->dev, HDA_CMD_SET_EAPD_BTL_ENABLE(0, w->nid, val)); } hdaa_unlock(devinfo); return (left | (left << 8)); } /* Recalculate all controls related to this OSS device. */ hdaa_audio_ctl_dev_volume(pdevinfo, dev); hdaa_unlock(devinfo); return (left | (right << 8)); } /* * Set mixer settings to our own default values: * +20dB for mics, -10dB for analog vol, mute for igain, 0dB for others. */ static void hdaa_audio_ctl_set_defaults(struct hdaa_pcm_devinfo *pdevinfo) { int amp, vol, dev; for (dev = 0; dev < SOUND_MIXER_NRDEVICES; dev++) { if ((pdevinfo->ossmask & (1 << dev)) == 0) continue; /* If the value was overriden, leave it as is. */ if (resource_int_value(device_get_name(pdevinfo->dev), device_get_unit(pdevinfo->dev), ossnames[dev], &vol) == 0) continue; vol = -1; if (dev == SOUND_MIXER_OGAIN) vol = 100; else if (dev == SOUND_MIXER_IGAIN) vol = 0; else if (dev == SOUND_MIXER_MIC || dev == SOUND_MIXER_MONITOR) amp = 20 * 4; /* +20dB */ else if (dev == SOUND_MIXER_VOLUME && !pdevinfo->digital) amp = -10 * 4; /* -10dB */ else amp = 0; if (vol < 0 && (pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]) <= 0) { vol = 100; } else if (vol < 0) { vol = ((amp - pdevinfo->minamp[dev]) * 100 + (pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]) / 2) / (pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]); vol = imin(imax(vol, 1), 100); } mix_set(pdevinfo->mixer, dev, vol, vol); } } /* * Recursively commutate specified record source. */ static uint32_t hdaa_audio_ctl_recsel_comm(struct hdaa_pcm_devinfo *pdevinfo, uint32_t src, nid_t nid, int depth) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w, *cw; struct hdaa_audio_ctl *ctl; char buf[64]; int i, muted; uint32_t res = 0; if (depth > HDA_PARSE_MAXDEPTH) return (0); w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return (0); for (i = 0; i < w->nconns; i++) { if (w->connsenable[i] == 0) continue; cw = hdaa_widget_get(devinfo, w->conns[i]); if (cw == NULL || cw->enable == 0 || cw->bindas == -1) continue; /* Call recursively to trace signal to it's source if needed. */ if ((src & cw->ossmask) != 0) { if (cw->ossdev < 0) { res |= hdaa_audio_ctl_recsel_comm(pdevinfo, src, w->conns[i], depth + 1); } else { res |= cw->ossmask; } } /* We have two special cases: mixers and others (selectors). */ if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) { ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, i, 1); if (ctl == NULL) continue; /* If we have input control on this node mute them * according to requested sources. */ muted = (src & cw->ossmask) ? 0 : 1; if (muted != ctl->forcemute) { ctl->forcemute = muted; hdaa_audio_ctl_amp_set(ctl, HDAA_AMP_MUTE_DEFAULT, HDAA_AMP_VOL_DEFAULT, HDAA_AMP_VOL_DEFAULT); } HDA_BOOTHVERBOSE( device_printf(pdevinfo->dev, "Recsel (%s): nid %d source %d %s\n", hdaa_audio_ctl_ossmixer_mask2allname( src, buf, sizeof(buf)), nid, i, muted?"mute":"unmute"); ); } else { if (w->nconns == 1) break; if ((src & cw->ossmask) == 0) continue; /* If we found requested source - select it and exit. */ hdaa_widget_connection_select(w, i); HDA_BOOTHVERBOSE( device_printf(pdevinfo->dev, "Recsel (%s): nid %d source %d select\n", hdaa_audio_ctl_ossmixer_mask2allname( src, buf, sizeof(buf)), nid, i); ); break; } } return (res); } static uint32_t hdaa_audio_ctl_ossmixer_setrecsrc(struct snd_mixer *m, uint32_t src) { struct hdaa_pcm_devinfo *pdevinfo = mix_getdevinfo(m); struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w; struct hdaa_audio_as *as; struct hdaa_audio_ctl *ctl; struct hdaa_chan *ch; int i, j; uint32_t ret = 0xffffffff; hdaa_lock(devinfo); if (pdevinfo->recas < 0) { hdaa_unlock(devinfo); return (0); } as = &devinfo->as[pdevinfo->recas]; /* For non-mixed associations we always recording everything. */ if (!as->mixed) { hdaa_unlock(devinfo); return (mix_getrecdevs(m)); } /* Commutate requested recsrc for each ADC. */ for (j = 0; j < as->num_chans; j++) { ch = &devinfo->chans[as->chans[j]]; for (i = 0; ch->io[i] >= 0; i++) { w = hdaa_widget_get(devinfo, ch->io[i]); if (w == NULL || w->enable == 0) continue; ret &= hdaa_audio_ctl_recsel_comm(pdevinfo, src, ch->io[i], 0); } } if (ret == 0xffffffff) ret = 0; /* * Some controls could be shared. Reset volumes for controls * related to previously chosen devices, as they may no longer * affect the signal. */ i = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { if (ctl->enable == 0 || !(ctl->ossmask & pdevinfo->recsrc)) continue; if (!((pdevinfo->playas >= 0 && ctl->widget->bindas == pdevinfo->playas) || (pdevinfo->recas >= 0 && ctl->widget->bindas == pdevinfo->recas) || (pdevinfo->index == 0 && ctl->widget->bindas == -2))) continue; for (j = 0; j < SOUND_MIXER_NRDEVICES; j++) { if (pdevinfo->recsrc & (1 << j)) { ctl->devleft[j] = 0; ctl->devright[j] = 0; ctl->devmute[j] = 0; } } } /* * Some controls could be shared. Set volumes for controls * related to devices selected both previously and now. */ for (j = 0; j < SOUND_MIXER_NRDEVICES; j++) { if ((ret | pdevinfo->recsrc) & (1 << j)) hdaa_audio_ctl_dev_volume(pdevinfo, j); } pdevinfo->recsrc = ret; hdaa_unlock(devinfo); return (ret); } static kobj_method_t hdaa_audio_ctl_ossmixer_methods[] = { KOBJMETHOD(mixer_init, hdaa_audio_ctl_ossmixer_init), KOBJMETHOD(mixer_set, hdaa_audio_ctl_ossmixer_set), KOBJMETHOD(mixer_setrecsrc, hdaa_audio_ctl_ossmixer_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(hdaa_audio_ctl_ossmixer); static void hdaa_dump_gpi(struct hdaa_devinfo *devinfo) { device_t dev = devinfo->dev; int i; uint32_t data, wake, unsol, sticky; if (HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap) > 0) { data = hda_command(dev, HDA_CMD_GET_GPI_DATA(0, devinfo->nid)); wake = hda_command(dev, HDA_CMD_GET_GPI_WAKE_ENABLE_MASK(0, devinfo->nid)); unsol = hda_command(dev, HDA_CMD_GET_GPI_UNSOLICITED_ENABLE_MASK(0, devinfo->nid)); sticky = hda_command(dev, HDA_CMD_GET_GPI_STICKY_MASK(0, devinfo->nid)); for (i = 0; i < HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap); i++) { device_printf(dev, " GPI%d:%s%s%s state=%d", i, (sticky & (1 << i)) ? " sticky" : "", (unsol & (1 << i)) ? " unsol" : "", (wake & (1 << i)) ? " wake" : "", (data >> i) & 1); } } } static void hdaa_dump_gpio(struct hdaa_devinfo *devinfo) { device_t dev = devinfo->dev; int i; uint32_t data, dir, enable, wake, unsol, sticky; if (HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap) > 0) { data = hda_command(dev, HDA_CMD_GET_GPIO_DATA(0, devinfo->nid)); enable = hda_command(dev, HDA_CMD_GET_GPIO_ENABLE_MASK(0, devinfo->nid)); dir = hda_command(dev, HDA_CMD_GET_GPIO_DIRECTION(0, devinfo->nid)); wake = hda_command(dev, HDA_CMD_GET_GPIO_WAKE_ENABLE_MASK(0, devinfo->nid)); unsol = hda_command(dev, HDA_CMD_GET_GPIO_UNSOLICITED_ENABLE_MASK(0, devinfo->nid)); sticky = hda_command(dev, HDA_CMD_GET_GPIO_STICKY_MASK(0, devinfo->nid)); for (i = 0; i < HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap); i++) { device_printf(dev, " GPIO%d: ", i); if ((enable & (1 << i)) == 0) { printf("disabled\n"); continue; } if ((dir & (1 << i)) == 0) { printf("input%s%s%s", (sticky & (1 << i)) ? " sticky" : "", (unsol & (1 << i)) ? " unsol" : "", (wake & (1 << i)) ? " wake" : ""); } else printf("output"); printf(" state=%d\n", (data >> i) & 1); } } } static void hdaa_dump_gpo(struct hdaa_devinfo *devinfo) { device_t dev = devinfo->dev; int i; uint32_t data; if (HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap) > 0) { data = hda_command(dev, HDA_CMD_GET_GPO_DATA(0, devinfo->nid)); for (i = 0; i < HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap); i++) { device_printf(dev, " GPO%d: state=%d", i, (data >> i) & 1); } } } static void hdaa_audio_parse(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; uint32_t res; int i; nid_t nid; nid = devinfo->nid; res = hda_command(devinfo->dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_GPIO_COUNT)); devinfo->gpio_cap = res; HDA_BOOTVERBOSE( device_printf(devinfo->dev, "NumGPIO=%d NumGPO=%d " "NumGPI=%d GPIWake=%d GPIUnsol=%d\n", HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_GPI_WAKE(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_GPI_UNSOL(devinfo->gpio_cap)); hdaa_dump_gpi(devinfo); hdaa_dump_gpio(devinfo); hdaa_dump_gpo(devinfo); ); res = hda_command(devinfo->dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_SUPP_STREAM_FORMATS)); devinfo->supp_stream_formats = res; res = hda_command(devinfo->dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_SUPP_PCM_SIZE_RATE)); devinfo->supp_pcm_size_rate = res; res = hda_command(devinfo->dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_OUTPUT_AMP_CAP)); devinfo->outamp_cap = res; res = hda_command(devinfo->dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_INPUT_AMP_CAP)); devinfo->inamp_cap = res; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL) device_printf(devinfo->dev, "Ghost widget! nid=%d!\n", i); else { w->devinfo = devinfo; w->nid = i; w->enable = 1; w->selconn = -1; w->pflags = 0; w->ossdev = -1; w->bindas = -1; w->param.eapdbtl = HDA_INVALID; hdaa_widget_parse(w); } } } static void hdaa_audio_postprocess(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; int i; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL) continue; hdaa_widget_postprocess(w); } } static void hdaa_audio_ctl_parse(struct hdaa_devinfo *devinfo) { struct hdaa_audio_ctl *ctls; struct hdaa_widget *w, *cw; int i, j, cnt, max, ocap, icap; int mute, offset, step, size; /* XXX This is redundant */ max = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->param.outamp_cap != 0) max++; if (w->param.inamp_cap != 0) { switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR: case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER: for (j = 0; j < w->nconns; j++) { cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || cw->enable == 0) continue; max++; } break; default: max++; break; } } } devinfo->ctlcnt = max; if (max < 1) return; ctls = (struct hdaa_audio_ctl *)malloc( sizeof(*ctls) * max, M_HDAA, M_ZERO | M_NOWAIT); if (ctls == NULL) { /* Blekh! */ device_printf(devinfo->dev, "unable to allocate ctls!\n"); devinfo->ctlcnt = 0; return; } cnt = 0; for (i = devinfo->startnode; cnt < max && i < devinfo->endnode; i++) { if (cnt >= max) { device_printf(devinfo->dev, "%s: Ctl overflow!\n", __func__); break; } w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; ocap = w->param.outamp_cap; icap = w->param.inamp_cap; if (ocap != 0) { mute = HDA_PARAM_OUTPUT_AMP_CAP_MUTE_CAP(ocap); step = HDA_PARAM_OUTPUT_AMP_CAP_NUMSTEPS(ocap); size = HDA_PARAM_OUTPUT_AMP_CAP_STEPSIZE(ocap); offset = HDA_PARAM_OUTPUT_AMP_CAP_OFFSET(ocap); /*if (offset > step) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, "BUGGY outamp: nid=%d " "[offset=%d > step=%d]\n", w->nid, offset, step); ); offset = step; }*/ ctls[cnt].enable = 1; ctls[cnt].widget = w; ctls[cnt].mute = mute; ctls[cnt].step = step; ctls[cnt].size = size; ctls[cnt].offset = offset; ctls[cnt].left = offset; ctls[cnt].right = offset; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->waspin) ctls[cnt].ndir = HDAA_CTL_IN; else ctls[cnt].ndir = HDAA_CTL_OUT; ctls[cnt++].dir = HDAA_CTL_OUT; } if (icap != 0) { mute = HDA_PARAM_OUTPUT_AMP_CAP_MUTE_CAP(icap); step = HDA_PARAM_OUTPUT_AMP_CAP_NUMSTEPS(icap); size = HDA_PARAM_OUTPUT_AMP_CAP_STEPSIZE(icap); offset = HDA_PARAM_OUTPUT_AMP_CAP_OFFSET(icap); /*if (offset > step) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, "BUGGY inamp: nid=%d " "[offset=%d > step=%d]\n", w->nid, offset, step); ); offset = step; }*/ switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR: case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER: for (j = 0; j < w->nconns; j++) { if (cnt >= max) { device_printf(devinfo->dev, "%s: Ctl overflow!\n", __func__); break; } cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || cw->enable == 0) continue; ctls[cnt].enable = 1; ctls[cnt].widget = w; ctls[cnt].childwidget = cw; ctls[cnt].index = j; ctls[cnt].mute = mute; ctls[cnt].step = step; ctls[cnt].size = size; ctls[cnt].offset = offset; ctls[cnt].left = offset; ctls[cnt].right = offset; ctls[cnt].ndir = HDAA_CTL_IN; ctls[cnt++].dir = HDAA_CTL_IN; } break; default: if (cnt >= max) { device_printf(devinfo->dev, "%s: Ctl overflow!\n", __func__); break; } ctls[cnt].enable = 1; ctls[cnt].widget = w; ctls[cnt].mute = mute; ctls[cnt].step = step; ctls[cnt].size = size; ctls[cnt].offset = offset; ctls[cnt].left = offset; ctls[cnt].right = offset; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) ctls[cnt].ndir = HDAA_CTL_OUT; else ctls[cnt].ndir = HDAA_CTL_IN; ctls[cnt++].dir = HDAA_CTL_IN; break; } } } devinfo->ctl = ctls; } static void hdaa_audio_as_parse(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as; struct hdaa_widget *w; int i, j, cnt, max, type, dir, assoc, seq, first, hpredir; /* Count present associations */ max = 0; for (j = 1; j < 16; j++) { for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (HDA_CONFIG_DEFAULTCONF_ASSOCIATION(w->wclass.pin.config) != j) continue; max++; if (j != 15) /* There could be many 1-pin assocs #15 */ break; } } devinfo->ascnt = max; if (max < 1) return; as = (struct hdaa_audio_as *)malloc( sizeof(*as) * max, M_HDAA, M_ZERO | M_NOWAIT); if (as == NULL) { /* Blekh! */ device_printf(devinfo->dev, "unable to allocate assocs!\n"); devinfo->ascnt = 0; return; } for (i = 0; i < max; i++) { as[i].hpredir = -1; as[i].digital = 0; as[i].num_chans = 1; as[i].location = -1; } /* Scan associations skipping as=0. */ cnt = 0; for (j = 1; j < 16 && cnt < max; j++) { first = 16; hpredir = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; assoc = HDA_CONFIG_DEFAULTCONF_ASSOCIATION(w->wclass.pin.config); seq = HDA_CONFIG_DEFAULTCONF_SEQUENCE(w->wclass.pin.config); if (assoc != j) { continue; } KASSERT(cnt < max, ("%s: Associations owerflow (%d of %d)", __func__, cnt, max)); type = w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK; /* Get pin direction. */ if (type == HDA_CONFIG_DEFAULTCONF_DEVICE_LINE_OUT || type == HDA_CONFIG_DEFAULTCONF_DEVICE_SPEAKER || type == HDA_CONFIG_DEFAULTCONF_DEVICE_HP_OUT || type == HDA_CONFIG_DEFAULTCONF_DEVICE_SPDIF_OUT || type == HDA_CONFIG_DEFAULTCONF_DEVICE_DIGITAL_OTHER_OUT) dir = HDAA_CTL_OUT; else dir = HDAA_CTL_IN; /* If this is a first pin - create new association. */ if (as[cnt].pincnt == 0) { as[cnt].enable = 1; as[cnt].index = j; as[cnt].dir = dir; } if (seq < first) first = seq; /* Check association correctness. */ if (as[cnt].pins[seq] != 0) { device_printf(devinfo->dev, "%s: Duplicate pin %d (%d) " "in association %d! Disabling association.\n", __func__, seq, w->nid, j); as[cnt].enable = 0; } if (dir != as[cnt].dir) { device_printf(devinfo->dev, "%s: Pin %d has wrong " "direction for association %d! Disabling " "association.\n", __func__, w->nid, j); as[cnt].enable = 0; } if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) { as[cnt].digital |= 0x1; if (HDA_PARAM_PIN_CAP_HDMI(w->wclass.pin.cap)) as[cnt].digital |= 0x2; if (HDA_PARAM_PIN_CAP_DP(w->wclass.pin.cap)) as[cnt].digital |= 0x4; } if (as[cnt].location == -1) { as[cnt].location = HDA_CONFIG_DEFAULTCONF_LOCATION(w->wclass.pin.config); } else if (as[cnt].location != HDA_CONFIG_DEFAULTCONF_LOCATION(w->wclass.pin.config)) { as[cnt].location = -2; } /* Headphones with seq=15 may mean redirection. */ if (type == HDA_CONFIG_DEFAULTCONF_DEVICE_HP_OUT && seq == 15) hpredir = 1; as[cnt].pins[seq] = w->nid; as[cnt].pincnt++; /* Association 15 is a multiple unassociated pins. */ if (j == 15) cnt++; } if (j != 15 && as[cnt].pincnt > 0) { if (hpredir && as[cnt].pincnt > 1) as[cnt].hpredir = first; cnt++; } } for (i = 0; i < max; i++) { if (as[i].dir == HDAA_CTL_IN && (as[i].pincnt == 1 || as[i].pins[14] > 0 || as[i].pins[15] > 0)) as[i].mixed = 1; } HDA_BOOTVERBOSE( device_printf(devinfo->dev, "%d associations found:\n", max); for (i = 0; i < max; i++) { device_printf(devinfo->dev, "Association %d (%d) %s%s:\n", i, as[i].index, (as[i].dir == HDAA_CTL_IN)?"in":"out", as[i].enable?"":" (disabled)"); for (j = 0; j < 16; j++) { if (as[i].pins[j] == 0) continue; device_printf(devinfo->dev, " Pin nid=%d seq=%d\n", as[i].pins[j], j); } } ); devinfo->as = as; } /* * Trace path from DAC to pin. */ static nid_t hdaa_audio_trace_dac(struct hdaa_devinfo *devinfo, int as, int seq, nid_t nid, int dupseq, int min, int only, int depth) { struct hdaa_widget *w; int i, im = -1; nid_t m = 0, ret; if (depth > HDA_PARSE_MAXDEPTH) return (0); w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return (0); HDA_BOOTHVERBOSE( if (!only) { device_printf(devinfo->dev, " %*stracing via nid %d\n", depth + 1, "", w->nid); } ); /* Use only unused widgets */ if (w->bindas >= 0 && w->bindas != as) { HDA_BOOTHVERBOSE( if (!only) { device_printf(devinfo->dev, " %*snid %d busy by association %d\n", depth + 1, "", w->nid, w->bindas); } ); return (0); } if (dupseq < 0) { if (w->bindseqmask != 0) { HDA_BOOTHVERBOSE( if (!only) { device_printf(devinfo->dev, " %*snid %d busy by seqmask %x\n", depth + 1, "", w->nid, w->bindseqmask); } ); return (0); } } else { /* If this is headphones - allow duplicate first pin. */ if (w->bindseqmask != 0 && (w->bindseqmask & (1 << dupseq)) == 0) { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d busy by seqmask %x\n", depth + 1, "", w->nid, w->bindseqmask); ); return (0); } } switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT: /* Do not traverse input. AD1988 has digital monitor for which we are not ready. */ break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT: /* If we are tracing HP take only dac of first pin. */ if ((only == 0 || only == w->nid) && (w->nid >= min) && (dupseq < 0 || w->nid == devinfo->as[as].dacs[0][dupseq])) m = w->nid; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX: if (depth > 0) break; /* Fall */ default: /* Find reachable DACs with smallest nid respecting constraints. */ for (i = 0; i < w->nconns; i++) { if (w->connsenable[i] == 0) continue; if (w->selconn != -1 && w->selconn != i) continue; if ((ret = hdaa_audio_trace_dac(devinfo, as, seq, w->conns[i], dupseq, min, only, depth + 1)) != 0) { if (m == 0 || ret < m) { m = ret; im = i; } if (only || dupseq >= 0) break; } } if (im >= 0 && only && ((w->nconns > 1 && w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR)) w->selconn = im; break; } if (m && only) { w->bindas = as; w->bindseqmask |= (1 << seq); } HDA_BOOTHVERBOSE( if (!only) { device_printf(devinfo->dev, " %*snid %d returned %d\n", depth + 1, "", w->nid, m); } ); return (m); } /* * Trace path from widget to ADC. */ static nid_t hdaa_audio_trace_adc(struct hdaa_devinfo *devinfo, int as, int seq, nid_t nid, int mixed, int min, int only, int depth, int *length, int onlylength) { struct hdaa_widget *w, *wc; int i, j, im, lm = HDA_PARSE_MAXDEPTH; nid_t m = 0, ret; if (depth > HDA_PARSE_MAXDEPTH) return (0); w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return (0); HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*stracing via nid %d\n", depth + 1, "", w->nid); ); /* Use only unused widgets */ if (w->bindas >= 0 && w->bindas != as) { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d busy by association %d\n", depth + 1, "", w->nid, w->bindas); ); return (0); } if (!mixed && w->bindseqmask != 0) { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d busy by seqmask %x\n", depth + 1, "", w->nid, w->bindseqmask); ); return (0); } switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT: if ((only == 0 || only == w->nid) && (w->nid >= min) && (onlylength == 0 || onlylength == depth)) { m = w->nid; *length = depth; } break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX: if (depth > 0) break; /* Fall */ default: /* Try to find reachable ADCs with specified nid. */ for (j = devinfo->startnode; j < devinfo->endnode; j++) { wc = hdaa_widget_get(devinfo, j); if (wc == NULL || wc->enable == 0) continue; im = -1; for (i = 0; i < wc->nconns; i++) { if (wc->connsenable[i] == 0) continue; if (wc->conns[i] != nid) continue; if ((ret = hdaa_audio_trace_adc(devinfo, as, seq, j, mixed, min, only, depth + 1, length, onlylength)) != 0) { if (m == 0 || ret < m || (ret == m && *length < lm)) { m = ret; im = i; lm = *length; } else *length = lm; if (only) break; } } if (im >= 0 && only && ((wc->nconns > 1 && wc->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) || wc->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR)) wc->selconn = im; } break; } if (m && only) { w->bindas = as; w->bindseqmask |= (1 << seq); } HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d returned %d\n", depth + 1, "", w->nid, m); ); return (m); } /* * Erase trace path of the specified association. */ static void hdaa_audio_undo_trace(struct hdaa_devinfo *devinfo, int as, int seq) { struct hdaa_widget *w; int i; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->bindas == as) { if (seq >= 0) { w->bindseqmask &= ~(1 << seq); if (w->bindseqmask == 0) { w->bindas = -1; w->selconn = -1; } } else { w->bindas = -1; w->bindseqmask = 0; w->selconn = -1; } } } } /* * Trace association path from DAC to output */ static int hdaa_audio_trace_as_out(struct hdaa_devinfo *devinfo, int as, int seq) { struct hdaa_audio_as *ases = devinfo->as; int i, hpredir; nid_t min, res; /* Find next pin */ for (i = seq; i < 16 && ases[as].pins[i] == 0; i++) ; /* Check if there is no any left. If so - we succeeded. */ if (i == 16) return (1); hpredir = (i == 15 && ases[as].fakeredir == 0)?ases[as].hpredir:-1; min = 0; do { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Tracing pin %d with min nid %d", ases[as].pins[i], min); if (hpredir >= 0) printf(" and hpredir %d", hpredir); printf("\n"); ); /* Trace this pin taking min nid into account. */ res = hdaa_audio_trace_dac(devinfo, as, i, ases[as].pins[i], hpredir, min, 0, 0); if (res == 0) { /* If we failed - return to previous and redo it. */ HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Unable to trace pin %d seq %d with min " "nid %d", ases[as].pins[i], i, min); if (hpredir >= 0) printf(" and hpredir %d", hpredir); printf("\n"); ); return (0); } HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Pin %d traced to DAC %d", ases[as].pins[i], res); if (hpredir >= 0) printf(" and hpredir %d", hpredir); if (ases[as].fakeredir) printf(" with fake redirection"); printf("\n"); ); /* Trace again to mark the path */ hdaa_audio_trace_dac(devinfo, as, i, ases[as].pins[i], hpredir, min, res, 0); ases[as].dacs[0][i] = res; /* We succeeded, so call next. */ if (hdaa_audio_trace_as_out(devinfo, as, i + 1)) return (1); /* If next failed, we should retry with next min */ hdaa_audio_undo_trace(devinfo, as, i); ases[as].dacs[0][i] = 0; min = res + 1; } while (1); } /* * Check equivalency of two DACs. */ static int hdaa_audio_dacs_equal(struct hdaa_widget *w1, struct hdaa_widget *w2) { struct hdaa_devinfo *devinfo = w1->devinfo; struct hdaa_widget *w3; int i, j, c1, c2; if (memcmp(&w1->param, &w2->param, sizeof(w1->param))) return (0); for (i = devinfo->startnode; i < devinfo->endnode; i++) { w3 = hdaa_widget_get(devinfo, i); if (w3 == NULL || w3->enable == 0) continue; if (w3->bindas != w1->bindas) continue; if (w3->nconns == 0) continue; c1 = c2 = -1; for (j = 0; j < w3->nconns; j++) { if (w3->connsenable[j] == 0) continue; if (w3->conns[j] == w1->nid) c1 = j; if (w3->conns[j] == w2->nid) c2 = j; } if (c1 < 0) continue; if (c2 < 0) return (0); if (w3->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) return (0); } return (1); } /* * Check equivalency of two ADCs. */ static int hdaa_audio_adcs_equal(struct hdaa_widget *w1, struct hdaa_widget *w2) { struct hdaa_devinfo *devinfo = w1->devinfo; struct hdaa_widget *w3, *w4; int i; if (memcmp(&w1->param, &w2->param, sizeof(w1->param))) return (0); if (w1->nconns != 1 || w2->nconns != 1) return (0); if (w1->conns[0] == w2->conns[0]) return (1); w3 = hdaa_widget_get(devinfo, w1->conns[0]); if (w3 == NULL || w3->enable == 0) return (0); w4 = hdaa_widget_get(devinfo, w2->conns[0]); if (w4 == NULL || w4->enable == 0) return (0); if (w3->bindas == w4->bindas && w3->bindseqmask == w4->bindseqmask) return (1); if (w4->bindas >= 0) return (0); if (w3->type != w4->type) return (0); if (memcmp(&w3->param, &w4->param, sizeof(w3->param))) return (0); if (w3->nconns != w4->nconns) return (0); for (i = 0; i < w3->nconns; i++) { if (w3->conns[i] != w4->conns[i]) return (0); } return (1); } /* * Look for equivalent DAC/ADC to implement second channel. */ static void hdaa_audio_adddac(struct hdaa_devinfo *devinfo, int asid) { struct hdaa_audio_as *as = &devinfo->as[asid]; struct hdaa_widget *w1, *w2; int i, pos; nid_t nid1, nid2; HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Looking for additional %sC " "for association %d (%d)\n", (as->dir == HDAA_CTL_OUT) ? "DA" : "AD", asid, as->index); ); /* Find the exisitng DAC position and return if found more the one. */ pos = -1; for (i = 0; i < 16; i++) { if (as->dacs[0][i] <= 0) continue; if (pos >= 0 && as->dacs[0][i] != as->dacs[0][pos]) return; pos = i; } nid1 = as->dacs[0][pos]; w1 = hdaa_widget_get(devinfo, nid1); w2 = NULL; for (nid2 = devinfo->startnode; nid2 < devinfo->endnode; nid2++) { w2 = hdaa_widget_get(devinfo, nid2); if (w2 == NULL || w2->enable == 0) continue; if (w2->bindas >= 0) continue; if (w1->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT) { if (w2->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT) continue; if (hdaa_audio_dacs_equal(w1, w2)) break; } else { if (w2->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) continue; if (hdaa_audio_adcs_equal(w1, w2)) break; } } if (nid2 >= devinfo->endnode) return; w2->bindas = w1->bindas; w2->bindseqmask = w1->bindseqmask; if (w1->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, " ADC %d considered equal to ADC %d\n", nid2, nid1); ); w1 = hdaa_widget_get(devinfo, w1->conns[0]); w2 = hdaa_widget_get(devinfo, w2->conns[0]); w2->bindas = w1->bindas; w2->bindseqmask = w1->bindseqmask; } else { HDA_BOOTVERBOSE( device_printf(devinfo->dev, " DAC %d considered equal to DAC %d\n", nid2, nid1); ); } for (i = 0; i < 16; i++) { if (as->dacs[0][i] <= 0) continue; as->dacs[as->num_chans][i] = nid2; } as->num_chans++; } /* * Trace association path from input to ADC */ static int hdaa_audio_trace_as_in(struct hdaa_devinfo *devinfo, int as) { struct hdaa_audio_as *ases = devinfo->as; struct hdaa_widget *w; int i, j, k, length; for (j = devinfo->startnode; j < devinfo->endnode; j++) { w = hdaa_widget_get(devinfo, j); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) continue; if (w->bindas >= 0 && w->bindas != as) continue; /* Find next pin */ for (i = 0; i < 16; i++) { if (ases[as].pins[i] == 0) continue; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Tracing pin %d to ADC %d\n", ases[as].pins[i], j); ); /* Trace this pin taking goal into account. */ if (hdaa_audio_trace_adc(devinfo, as, i, ases[as].pins[i], 1, 0, j, 0, &length, 0) == 0) { /* If we failed - return to previous and redo it. */ HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Unable to trace pin %d to ADC %d, undo traces\n", ases[as].pins[i], j); ); hdaa_audio_undo_trace(devinfo, as, -1); for (k = 0; k < 16; k++) ases[as].dacs[0][k] = 0; break; } HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Pin %d traced to ADC %d\n", ases[as].pins[i], j); ); ases[as].dacs[0][i] = j; } if (i == 16) return (1); } return (0); } /* * Trace association path from input to multiple ADCs */ static int hdaa_audio_trace_as_in_mch(struct hdaa_devinfo *devinfo, int as, int seq) { struct hdaa_audio_as *ases = devinfo->as; int i, length; nid_t min, res; /* Find next pin */ for (i = seq; i < 16 && ases[as].pins[i] == 0; i++) ; /* Check if there is no any left. If so - we succeeded. */ if (i == 16) return (1); min = 0; do { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Tracing pin %d with min nid %d", ases[as].pins[i], min); printf("\n"); ); /* Trace this pin taking min nid into account. */ res = hdaa_audio_trace_adc(devinfo, as, i, ases[as].pins[i], 0, min, 0, 0, &length, 0); if (res == 0) { /* If we failed - return to previous and redo it. */ HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Unable to trace pin %d seq %d with min " "nid %d", ases[as].pins[i], i, min); printf("\n"); ); return (0); } HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Pin %d traced to ADC %d\n", ases[as].pins[i], res); ); /* Trace again to mark the path */ hdaa_audio_trace_adc(devinfo, as, i, ases[as].pins[i], 0, min, res, 0, &length, length); ases[as].dacs[0][i] = res; /* We succeeded, so call next. */ if (hdaa_audio_trace_as_in_mch(devinfo, as, i + 1)) return (1); /* If next failed, we should retry with next min */ hdaa_audio_undo_trace(devinfo, as, i); ases[as].dacs[0][i] = 0; min = res + 1; } while (1); } /* * Trace input monitor path from mixer to output association. */ static int hdaa_audio_trace_to_out(struct hdaa_devinfo *devinfo, nid_t nid, int depth) { struct hdaa_audio_as *ases = devinfo->as; struct hdaa_widget *w, *wc; int i, j; nid_t res = 0; if (depth > HDA_PARSE_MAXDEPTH) return (0); w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return (0); HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*stracing via nid %d\n", depth + 1, "", w->nid); ); /* Use only unused widgets */ if (depth > 0 && w->bindas != -1) { if (w->bindas < 0 || ases[w->bindas].dir == HDAA_CTL_OUT) { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d found output association %d\n", depth + 1, "", w->nid, w->bindas); ); if (w->bindas >= 0) w->pflags |= HDAA_ADC_MONITOR; return (1); } else { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d busy by input association %d\n", depth + 1, "", w->nid, w->bindas); ); return (0); } } switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT: /* Do not traverse input. AD1988 has digital monitor for which we are not ready. */ break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX: if (depth > 0) break; /* Fall */ default: /* Try to find reachable ADCs with specified nid. */ for (j = devinfo->startnode; j < devinfo->endnode; j++) { wc = hdaa_widget_get(devinfo, j); if (wc == NULL || wc->enable == 0) continue; for (i = 0; i < wc->nconns; i++) { if (wc->connsenable[i] == 0) continue; if (wc->conns[i] != nid) continue; if (hdaa_audio_trace_to_out(devinfo, j, depth + 1) != 0) { res = 1; if (wc->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR && wc->selconn == -1) wc->selconn = i; } } } break; } if (res && w->bindas == -1) w->bindas = -2; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d returned %d\n", depth + 1, "", w->nid, res); ); return (res); } /* * Trace extra associations (beeper, monitor) */ static void hdaa_audio_trace_as_extra(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w; int j; /* Input monitor */ /* Find mixer associated with input, but supplying signal for output associations. Hope it will be input monitor. */ HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Tracing input monitor\n"); ); for (j = devinfo->startnode; j < devinfo->endnode; j++) { w = hdaa_widget_get(devinfo, j); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) continue; if (w->bindas < 0 || as[w->bindas].dir != HDAA_CTL_IN) continue; HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Tracing nid %d to out\n", j); ); if (hdaa_audio_trace_to_out(devinfo, w->nid, 0)) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, " nid %d is input monitor\n", w->nid); ); w->ossdev = SOUND_MIXER_IMIX; } } /* Other inputs monitor */ /* Find input pins supplying signal for output associations. Hope it will be input monitoring. */ HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Tracing other input monitors\n"); ); for (j = devinfo->startnode; j < devinfo->endnode; j++) { w = hdaa_widget_get(devinfo, j); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (w->bindas < 0 || as[w->bindas].dir != HDAA_CTL_IN) continue; HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Tracing nid %d to out\n", j); ); if (hdaa_audio_trace_to_out(devinfo, w->nid, 0)) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, " nid %d is input monitor\n", w->nid); ); } } /* Beeper */ HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Tracing beeper\n"); ); for (j = devinfo->startnode; j < devinfo->endnode; j++) { w = hdaa_widget_get(devinfo, j); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_BEEP_WIDGET) continue; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Tracing nid %d to out\n", j); ); if (hdaa_audio_trace_to_out(devinfo, w->nid, 0)) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, " nid %d traced to out\n", j); ); } w->bindas = -2; } } /* * Bind assotiations to PCM channels */ static void hdaa_audio_bind_as(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; int i, j, cnt = 0, free; for (j = 0; j < devinfo->ascnt; j++) { if (as[j].enable) cnt += as[j].num_chans; } if (devinfo->num_chans == 0) { devinfo->chans = (struct hdaa_chan *)malloc( sizeof(struct hdaa_chan) * cnt, M_HDAA, M_ZERO | M_NOWAIT); if (devinfo->chans == NULL) { device_printf(devinfo->dev, "Channels memory allocation failed!\n"); return; } } else { devinfo->chans = (struct hdaa_chan *)realloc(devinfo->chans, sizeof(struct hdaa_chan) * (devinfo->num_chans + cnt), M_HDAA, M_ZERO | M_NOWAIT); if (devinfo->chans == NULL) { devinfo->num_chans = 0; device_printf(devinfo->dev, "Channels memory allocation failed!\n"); return; } /* Fixup relative pointers after realloc */ for (j = 0; j < devinfo->num_chans; j++) devinfo->chans[j].caps.fmtlist = devinfo->chans[j].fmtlist; } free = devinfo->num_chans; devinfo->num_chans += cnt; for (j = free; j < free + cnt; j++) { devinfo->chans[j].devinfo = devinfo; devinfo->chans[j].as = -1; } /* Assign associations in order of their numbers, */ for (j = 0; j < devinfo->ascnt; j++) { if (as[j].enable == 0) continue; for (i = 0; i < as[j].num_chans; i++) { devinfo->chans[free].as = j; devinfo->chans[free].asindex = i; devinfo->chans[free].dir = (as[j].dir == HDAA_CTL_IN) ? PCMDIR_REC : PCMDIR_PLAY; hdaa_pcmchannel_setup(&devinfo->chans[free]); as[j].chans[i] = free; free++; } } } static void hdaa_audio_disable_nonaudio(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; int i; /* Disable power and volume widgets. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_POWER_WIDGET || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_VOLUME_WIDGET) { w->enable = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling nid %d due to it's" " non-audio type.\n", w->nid); ); } } } static void hdaa_audio_disable_useless(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w, *cw; struct hdaa_audio_ctl *ctl; int done, found, i, j, k; /* Disable useless pins. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) { if ((w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK) == HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_NONE) { w->enable = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling pin nid %d due" " to None connectivity.\n", w->nid); ); } else if ((w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_ASSOCIATION_MASK) == 0) { w->enable = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling unassociated" " pin nid %d.\n", w->nid); ); } } } do { done = 1; /* Disable and mute controls for disabled widgets. */ i = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { if (ctl->enable == 0) continue; if (ctl->widget->enable == 0 || (ctl->childwidget != NULL && ctl->childwidget->enable == 0)) { ctl->forcemute = 1; ctl->muted = HDAA_AMP_MUTE_ALL; ctl->left = 0; ctl->right = 0; ctl->enable = 0; if (ctl->ndir == HDAA_CTL_IN) ctl->widget->connsenable[ctl->index] = 0; done = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling ctl %d nid %d cnid %d due" " to disabled widget.\n", i, ctl->widget->nid, (ctl->childwidget != NULL)? ctl->childwidget->nid:-1); ); } } /* Disable useless widgets. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; /* Disable inputs with disabled child widgets. */ for (j = 0; j < w->nconns; j++) { if (w->connsenable[j]) { cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || cw->enable == 0) { w->connsenable[j] = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling nid %d connection %d due" " to disabled child widget.\n", i, j); ); } } } if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR && w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) continue; /* Disable mixers and selectors without inputs. */ found = 0; for (j = 0; j < w->nconns; j++) { if (w->connsenable[j]) { found = 1; break; } } if (found == 0) { w->enable = 0; done = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling nid %d due to all it's" " inputs disabled.\n", w->nid); ); } /* Disable nodes without consumers. */ if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR && w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) continue; found = 0; for (k = devinfo->startnode; k < devinfo->endnode; k++) { cw = hdaa_widget_get(devinfo, k); if (cw == NULL || cw->enable == 0) continue; for (j = 0; j < cw->nconns; j++) { if (cw->connsenable[j] && cw->conns[j] == i) { found = 1; break; } } } if (found == 0) { w->enable = 0; done = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling nid %d due to all it's" " consumers disabled.\n", w->nid); ); } } } while (done == 0); } static void hdaa_audio_disable_unas(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w, *cw; struct hdaa_audio_ctl *ctl; int i, j, k; /* Disable unassosiated widgets. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->bindas == -1) { w->enable = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling unassociated nid %d.\n", w->nid); ); } } /* Disable input connections on input pin and * output on output. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (w->bindas < 0) continue; if (as[w->bindas].dir == HDAA_CTL_IN) { for (j = 0; j < w->nconns; j++) { if (w->connsenable[j] == 0) continue; w->connsenable[j] = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling connection to input pin " "nid %d conn %d.\n", i, j); ); } ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, -1, 1); if (ctl && ctl->enable) { ctl->forcemute = 1; ctl->muted = HDAA_AMP_MUTE_ALL; ctl->left = 0; ctl->right = 0; ctl->enable = 0; } } else { ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_OUT, -1, 1); if (ctl && ctl->enable) { ctl->forcemute = 1; ctl->muted = HDAA_AMP_MUTE_ALL; ctl->left = 0; ctl->right = 0; ctl->enable = 0; } for (k = devinfo->startnode; k < devinfo->endnode; k++) { cw = hdaa_widget_get(devinfo, k); if (cw == NULL || cw->enable == 0) continue; for (j = 0; j < cw->nconns; j++) { if (cw->connsenable[j] && cw->conns[j] == i) { cw->connsenable[j] = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling connection from output pin " "nid %d conn %d cnid %d.\n", k, j, i); ); if (cw->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && cw->nconns > 1) continue; ctl = hdaa_audio_ctl_amp_get(devinfo, k, HDAA_CTL_IN, j, 1); if (ctl && ctl->enable) { ctl->forcemute = 1; ctl->muted = HDAA_AMP_MUTE_ALL; ctl->left = 0; ctl->right = 0; ctl->enable = 0; } } } } } } } static void hdaa_audio_disable_notselected(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w; int i, j; /* On playback path we can safely disable all unseleted inputs. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->nconns <= 1) continue; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) continue; if (w->bindas < 0 || as[w->bindas].dir == HDAA_CTL_IN) continue; for (j = 0; j < w->nconns; j++) { if (w->connsenable[j] == 0) continue; if (w->selconn < 0 || w->selconn == j) continue; w->connsenable[j] = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling unselected connection " "nid %d conn %d.\n", i, j); ); } } } static void hdaa_audio_disable_crossas(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *ases = devinfo->as; struct hdaa_widget *w, *cw; struct hdaa_audio_ctl *ctl; int i, j; /* Disable crossassociatement and unwanted crosschannel connections. */ /* ... using selectors */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->nconns <= 1) continue; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) continue; /* Allow any -> mix */ if (w->bindas == -2) continue; for (j = 0; j < w->nconns; j++) { if (w->connsenable[j] == 0) continue; cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || w->enable == 0) continue; /* Allow mix -> out. */ if (cw->bindas == -2 && w->bindas >= 0 && ases[w->bindas].dir == HDAA_CTL_OUT) continue; /* Allow mix -> mixed-in. */ if (cw->bindas == -2 && w->bindas >= 0 && ases[w->bindas].mixed) continue; /* Allow in -> mix. */ if ((w->pflags & HDAA_ADC_MONITOR) && cw->bindas >= 0 && ases[cw->bindas].dir == HDAA_CTL_IN) continue; /* Allow if have common as/seqs. */ if (w->bindas == cw->bindas && (w->bindseqmask & cw->bindseqmask) != 0) continue; w->connsenable[j] = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling crossassociatement connection " "nid %d conn %d cnid %d.\n", i, j, cw->nid); ); } } /* ... using controls */ i = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { if (ctl->enable == 0 || ctl->childwidget == NULL) continue; /* Allow any -> mix */ if (ctl->widget->bindas == -2) continue; /* Allow mix -> out. */ if (ctl->childwidget->bindas == -2 && ctl->widget->bindas >= 0 && ases[ctl->widget->bindas].dir == HDAA_CTL_OUT) continue; /* Allow mix -> mixed-in. */ if (ctl->childwidget->bindas == -2 && ctl->widget->bindas >= 0 && ases[ctl->widget->bindas].mixed) continue; /* Allow in -> mix. */ if ((ctl->widget->pflags & HDAA_ADC_MONITOR) && ctl->childwidget->bindas >= 0 && ases[ctl->childwidget->bindas].dir == HDAA_CTL_IN) continue; /* Allow if have common as/seqs. */ if (ctl->widget->bindas == ctl->childwidget->bindas && (ctl->widget->bindseqmask & ctl->childwidget->bindseqmask) != 0) continue; ctl->forcemute = 1; ctl->muted = HDAA_AMP_MUTE_ALL; ctl->left = 0; ctl->right = 0; ctl->enable = 0; if (ctl->ndir == HDAA_CTL_IN) ctl->widget->connsenable[ctl->index] = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling crossassociatement connection " "ctl %d nid %d cnid %d.\n", i, ctl->widget->nid, ctl->childwidget->nid); ); } } /* * Find controls to control amplification for source and calculate possible * amplification range. */ static int hdaa_audio_ctl_source_amp(struct hdaa_devinfo *devinfo, nid_t nid, int index, int ossdev, int ctlable, int depth, int *minamp, int *maxamp) { struct hdaa_widget *w, *wc; struct hdaa_audio_ctl *ctl; int i, j, conns = 0, tminamp, tmaxamp, cminamp, cmaxamp, found = 0; if (depth > HDA_PARSE_MAXDEPTH) return (found); w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return (found); /* Count number of active inputs. */ if (depth > 0) { for (j = 0; j < w->nconns; j++) { if (!w->connsenable[j]) continue; conns++; } } /* If this is not a first step - use input mixer. Pins have common input ctl so care must be taken. */ if (depth > 0 && ctlable && (conns == 1 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)) { ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, index, 1); if (ctl) { ctl->ossmask |= (1 << ossdev); found++; if (*minamp == *maxamp) { *minamp += MINQDB(ctl); *maxamp += MAXQDB(ctl); } } } /* If widget has own ossdev - not traverse it. It will be traversed on its own. */ if (w->ossdev >= 0 && depth > 0) return (found); /* We must not traverse pin */ if ((w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) && depth > 0) return (found); /* record that this widget exports such signal, */ w->ossmask |= (1 << ossdev); /* * If signals mixed, we can't assign controls farther. * Ignore this on depth zero. Caller must knows why. */ if (conns > 1 && w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) ctlable = 0; if (ctlable) { ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_OUT, -1, 1); if (ctl) { ctl->ossmask |= (1 << ossdev); found++; if (*minamp == *maxamp) { *minamp += MINQDB(ctl); *maxamp += MAXQDB(ctl); } } } cminamp = cmaxamp = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { wc = hdaa_widget_get(devinfo, i); if (wc == NULL || wc->enable == 0) continue; for (j = 0; j < wc->nconns; j++) { if (wc->connsenable[j] && wc->conns[j] == nid) { tminamp = tmaxamp = 0; found += hdaa_audio_ctl_source_amp(devinfo, wc->nid, j, ossdev, ctlable, depth + 1, &tminamp, &tmaxamp); if (cminamp == 0 && cmaxamp == 0) { cminamp = tminamp; cmaxamp = tmaxamp; } else if (tminamp != tmaxamp) { cminamp = imax(cminamp, tminamp); cmaxamp = imin(cmaxamp, tmaxamp); } } } } if (*minamp == *maxamp && cminamp < cmaxamp) { *minamp += cminamp; *maxamp += cmaxamp; } return (found); } /* * Find controls to control amplification for destination and calculate * possible amplification range. */ static int hdaa_audio_ctl_dest_amp(struct hdaa_devinfo *devinfo, nid_t nid, int index, int ossdev, int depth, int *minamp, int *maxamp) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w, *wc; struct hdaa_audio_ctl *ctl; int i, j, consumers, tminamp, tmaxamp, cminamp, cmaxamp, found = 0; if (depth > HDA_PARSE_MAXDEPTH) return (found); w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return (found); if (depth > 0) { /* If this node produce output for several consumers, we can't touch it. */ consumers = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { wc = hdaa_widget_get(devinfo, i); if (wc == NULL || wc->enable == 0) continue; for (j = 0; j < wc->nconns; j++) { if (wc->connsenable[j] && wc->conns[j] == nid) consumers++; } } /* The only exception is if real HP redirection is configured and this is a duplication point. XXX: Actually exception is not completely correct. XXX: Duplication point check is not perfect. */ if ((consumers == 2 && (w->bindas < 0 || as[w->bindas].hpredir < 0 || as[w->bindas].fakeredir || (w->bindseqmask & (1 << 15)) == 0)) || consumers > 2) return (found); /* Else use it's output mixer. */ ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_OUT, -1, 1); if (ctl) { ctl->ossmask |= (1 << ossdev); found++; if (*minamp == *maxamp) { *minamp += MINQDB(ctl); *maxamp += MAXQDB(ctl); } } } /* We must not traverse pin */ if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && depth > 0) return (found); cminamp = cmaxamp = 0; for (i = 0; i < w->nconns; i++) { if (w->connsenable[i] == 0) continue; if (index >= 0 && i != index) continue; tminamp = tmaxamp = 0; ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, i, 1); if (ctl) { ctl->ossmask |= (1 << ossdev); found++; if (*minamp == *maxamp) { tminamp += MINQDB(ctl); tmaxamp += MAXQDB(ctl); } } found += hdaa_audio_ctl_dest_amp(devinfo, w->conns[i], -1, ossdev, depth + 1, &tminamp, &tmaxamp); if (cminamp == 0 && cmaxamp == 0) { cminamp = tminamp; cmaxamp = tmaxamp; } else if (tminamp != tmaxamp) { cminamp = imax(cminamp, tminamp); cmaxamp = imin(cmaxamp, tmaxamp); } } if (*minamp == *maxamp && cminamp < cmaxamp) { *minamp += cminamp; *maxamp += cmaxamp; } return (found); } /* * Assign OSS names to sound sources */ static void hdaa_audio_assign_names(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w; int i, j; int type = -1, use, used = 0; static const int types[7][13] = { { SOUND_MIXER_LINE, SOUND_MIXER_LINE1, SOUND_MIXER_LINE2, SOUND_MIXER_LINE3, -1 }, /* line */ { SOUND_MIXER_MONITOR, SOUND_MIXER_MIC, -1 }, /* int mic */ { SOUND_MIXER_MIC, SOUND_MIXER_MONITOR, -1 }, /* ext mic */ { SOUND_MIXER_CD, -1 }, /* cd */ { SOUND_MIXER_SPEAKER, -1 }, /* speaker */ { SOUND_MIXER_DIGITAL1, SOUND_MIXER_DIGITAL2, SOUND_MIXER_DIGITAL3, -1 }, /* digital */ { SOUND_MIXER_LINE, SOUND_MIXER_LINE1, SOUND_MIXER_LINE2, SOUND_MIXER_LINE3, SOUND_MIXER_PHONEIN, SOUND_MIXER_PHONEOUT, SOUND_MIXER_VIDEO, SOUND_MIXER_RADIO, SOUND_MIXER_DIGITAL1, SOUND_MIXER_DIGITAL2, SOUND_MIXER_DIGITAL3, SOUND_MIXER_MONITOR, -1 } /* others */ }; /* Surely known names */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->bindas == -1) continue; use = -1; switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX: if (as[w->bindas].dir == HDAA_CTL_OUT) break; type = -1; switch (w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK) { case HDA_CONFIG_DEFAULTCONF_DEVICE_LINE_IN: type = 0; break; case HDA_CONFIG_DEFAULTCONF_DEVICE_MIC_IN: if ((w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK) == HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_JACK) break; type = 1; break; case HDA_CONFIG_DEFAULTCONF_DEVICE_CD: type = 3; break; case HDA_CONFIG_DEFAULTCONF_DEVICE_SPEAKER: type = 4; break; case HDA_CONFIG_DEFAULTCONF_DEVICE_SPDIF_IN: case HDA_CONFIG_DEFAULTCONF_DEVICE_DIGITAL_OTHER_IN: type = 5; break; } if (type == -1) break; j = 0; while (types[type][j] >= 0 && (used & (1 << types[type][j])) != 0) { j++; } if (types[type][j] >= 0) use = types[type][j]; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT: use = SOUND_MIXER_PCM; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_BEEP_WIDGET: use = SOUND_MIXER_SPEAKER; break; default: break; } if (use >= 0) { w->ossdev = use; used |= (1 << use); } } /* Semi-known names */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->ossdev >= 0) continue; if (w->bindas == -1) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (as[w->bindas].dir == HDAA_CTL_OUT) continue; type = -1; switch (w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK) { case HDA_CONFIG_DEFAULTCONF_DEVICE_LINE_OUT: case HDA_CONFIG_DEFAULTCONF_DEVICE_SPEAKER: case HDA_CONFIG_DEFAULTCONF_DEVICE_HP_OUT: case HDA_CONFIG_DEFAULTCONF_DEVICE_AUX: type = 0; break; case HDA_CONFIG_DEFAULTCONF_DEVICE_MIC_IN: type = 2; break; case HDA_CONFIG_DEFAULTCONF_DEVICE_SPDIF_OUT: case HDA_CONFIG_DEFAULTCONF_DEVICE_DIGITAL_OTHER_OUT: type = 5; break; } if (type == -1) break; j = 0; while (types[type][j] >= 0 && (used & (1 << types[type][j])) != 0) { j++; } if (types[type][j] >= 0) { w->ossdev = types[type][j]; used |= (1 << types[type][j]); } } /* Others */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->ossdev >= 0) continue; if (w->bindas == -1) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (as[w->bindas].dir == HDAA_CTL_OUT) continue; j = 0; while (types[6][j] >= 0 && (used & (1 << types[6][j])) != 0) { j++; } if (types[6][j] >= 0) { w->ossdev = types[6][j]; used |= (1 << types[6][j]); } } } static void hdaa_audio_build_tree(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; int j, res; /* Trace all associations in order of their numbers. */ for (j = 0; j < devinfo->ascnt; j++) { if (as[j].enable == 0) continue; HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Tracing association %d (%d)\n", j, as[j].index); ); if (as[j].dir == HDAA_CTL_OUT) { retry: res = hdaa_audio_trace_as_out(devinfo, j, 0); if (res == 0 && as[j].hpredir >= 0 && as[j].fakeredir == 0) { /* If CODEC can't do analog HP redirection try to make it using one more DAC. */ as[j].fakeredir = 1; goto retry; } } else if (as[j].mixed) res = hdaa_audio_trace_as_in(devinfo, j); else res = hdaa_audio_trace_as_in_mch(devinfo, j, 0); if (res) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Association %d (%d) trace succeeded\n", j, as[j].index); ); } else { HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Association %d (%d) trace failed\n", j, as[j].index); ); as[j].enable = 0; } } /* Look for additional DACs/ADCs. */ for (j = 0; j < devinfo->ascnt; j++) { if (as[j].enable == 0) continue; hdaa_audio_adddac(devinfo, j); } /* Trace mixer and beeper pseudo associations. */ hdaa_audio_trace_as_extra(devinfo); } /* * Store in pdevinfo new data about whether and how we can control signal * for OSS device to/from specified widget. */ static void hdaa_adjust_amp(struct hdaa_widget *w, int ossdev, int found, int minamp, int maxamp) { struct hdaa_devinfo *devinfo = w->devinfo; struct hdaa_pcm_devinfo *pdevinfo; if (w->bindas >= 0) pdevinfo = devinfo->as[w->bindas].pdevinfo; else pdevinfo = &devinfo->devs[0]; if (found) pdevinfo->ossmask |= (1 << ossdev); if (minamp == 0 && maxamp == 0) return; if (pdevinfo->minamp[ossdev] == 0 && pdevinfo->maxamp[ossdev] == 0) { pdevinfo->minamp[ossdev] = minamp; pdevinfo->maxamp[ossdev] = maxamp; } else { pdevinfo->minamp[ossdev] = imax(pdevinfo->minamp[ossdev], minamp); pdevinfo->maxamp[ossdev] = imin(pdevinfo->maxamp[ossdev], maxamp); } } /* * Trace signals from/to all possible sources/destionstions to find possible * recording sources, OSS device control ranges and to assign controls. */ static void hdaa_audio_assign_mixers(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w, *cw; int i, j, minamp, maxamp, found; /* Assign mixers to the tree. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; minamp = maxamp = 0; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_BEEP_WIDGET || (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && as[w->bindas].dir == HDAA_CTL_IN)) { if (w->ossdev < 0) continue; found = hdaa_audio_ctl_source_amp(devinfo, w->nid, -1, w->ossdev, 1, 0, &minamp, &maxamp); hdaa_adjust_amp(w, w->ossdev, found, minamp, maxamp); } else if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) { found = hdaa_audio_ctl_dest_amp(devinfo, w->nid, -1, SOUND_MIXER_RECLEV, 0, &minamp, &maxamp); hdaa_adjust_amp(w, SOUND_MIXER_RECLEV, found, minamp, maxamp); } else if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && as[w->bindas].dir == HDAA_CTL_OUT) { found = hdaa_audio_ctl_dest_amp(devinfo, w->nid, -1, SOUND_MIXER_VOLUME, 0, &minamp, &maxamp); hdaa_adjust_amp(w, SOUND_MIXER_VOLUME, found, minamp, maxamp); } if (w->ossdev == SOUND_MIXER_IMIX) { minamp = maxamp = 0; found = hdaa_audio_ctl_source_amp(devinfo, w->nid, -1, w->ossdev, 1, 0, &minamp, &maxamp); if (minamp == maxamp) { /* If we are unable to control input monitor as source - try to control it as destination. */ found += hdaa_audio_ctl_dest_amp(devinfo, w->nid, -1, w->ossdev, 0, &minamp, &maxamp); w->pflags |= HDAA_IMIX_AS_DST; } hdaa_adjust_amp(w, w->ossdev, found, minamp, maxamp); } if (w->pflags & HDAA_ADC_MONITOR) { for (j = 0; j < w->nconns; j++) { if (!w->connsenable[j]) continue; cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || cw->enable == 0) continue; if (cw->bindas == -1) continue; if (cw->bindas >= 0 && as[cw->bindas].dir != HDAA_CTL_IN) continue; minamp = maxamp = 0; found = hdaa_audio_ctl_dest_amp(devinfo, w->nid, j, SOUND_MIXER_IGAIN, 0, &minamp, &maxamp); hdaa_adjust_amp(w, SOUND_MIXER_IGAIN, found, minamp, maxamp); } } } } static void hdaa_audio_prepare_pin_ctrl(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w; uint32_t pincap; int i; for (i = 0; i < devinfo->nodecnt; i++) { w = &devinfo->widget[i]; if (w == NULL) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && w->waspin == 0) continue; pincap = w->wclass.pin.cap; /* Disable everything. */ if (devinfo->init_clear) { w->wclass.pin.ctrl &= ~( HDA_CMD_SET_PIN_WIDGET_CTRL_HPHN_ENABLE | HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE | HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE | HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK); } if (w->enable == 0) { /* Pin is unused so left it disabled. */ continue; } else if (w->waspin) { /* Enable input for beeper input. */ w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE; } else if (w->bindas < 0 || as[w->bindas].enable == 0) { /* Pin is unused so left it disabled. */ continue; } else if (as[w->bindas].dir == HDAA_CTL_IN) { /* Input pin, configure for input. */ if (HDA_PARAM_PIN_CAP_INPUT_CAP(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE; if ((devinfo->quirks & HDAA_QUIRK_IVREF100) && HDA_PARAM_PIN_CAP_VREF_CTRL_100(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE( HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_100); else if ((devinfo->quirks & HDAA_QUIRK_IVREF80) && HDA_PARAM_PIN_CAP_VREF_CTRL_80(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE( HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_80); else if ((devinfo->quirks & HDAA_QUIRK_IVREF50) && HDA_PARAM_PIN_CAP_VREF_CTRL_50(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE( HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_50); } else { /* Output pin, configure for output. */ if (HDA_PARAM_PIN_CAP_OUTPUT_CAP(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE; if (HDA_PARAM_PIN_CAP_HEADPHONE_CAP(pincap) && (w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK) == HDA_CONFIG_DEFAULTCONF_DEVICE_HP_OUT) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_HPHN_ENABLE; if ((devinfo->quirks & HDAA_QUIRK_OVREF100) && HDA_PARAM_PIN_CAP_VREF_CTRL_100(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE( HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_100); else if ((devinfo->quirks & HDAA_QUIRK_OVREF80) && HDA_PARAM_PIN_CAP_VREF_CTRL_80(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE( HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_80); else if ((devinfo->quirks & HDAA_QUIRK_OVREF50) && HDA_PARAM_PIN_CAP_VREF_CTRL_50(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE( HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_50); } } } static void hdaa_audio_ctl_commit(struct hdaa_devinfo *devinfo) { struct hdaa_audio_ctl *ctl; int i, z; i = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { if (ctl->enable == 0 || ctl->ossmask != 0) { /* Mute disabled and mixer controllable controls. * Last will be initialized by mixer_init(). * This expected to reduce click on startup. */ hdaa_audio_ctl_amp_set(ctl, HDAA_AMP_MUTE_ALL, 0, 0); continue; } /* Init fixed controls to 0dB amplification. */ z = ctl->offset; if (z > ctl->step) z = ctl->step; hdaa_audio_ctl_amp_set(ctl, HDAA_AMP_MUTE_NONE, z, z); } } static void hdaa_gpio_commit(struct hdaa_devinfo *devinfo) { uint32_t gdata, gmask, gdir; int i, numgpio; numgpio = HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap); if (devinfo->gpio != 0 && numgpio != 0) { gdata = hda_command(devinfo->dev, HDA_CMD_GET_GPIO_DATA(0, devinfo->nid)); gmask = hda_command(devinfo->dev, HDA_CMD_GET_GPIO_ENABLE_MASK(0, devinfo->nid)); gdir = hda_command(devinfo->dev, HDA_CMD_GET_GPIO_DIRECTION(0, devinfo->nid)); for (i = 0; i < numgpio; i++) { if ((devinfo->gpio & HDAA_GPIO_MASK(i)) == HDAA_GPIO_SET(i)) { gdata |= (1 << i); gmask |= (1 << i); gdir |= (1 << i); } else if ((devinfo->gpio & HDAA_GPIO_MASK(i)) == HDAA_GPIO_CLEAR(i)) { gdata &= ~(1 << i); gmask |= (1 << i); gdir |= (1 << i); } else if ((devinfo->gpio & HDAA_GPIO_MASK(i)) == HDAA_GPIO_DISABLE(i)) { gmask &= ~(1 << i); } else if ((devinfo->gpio & HDAA_GPIO_MASK(i)) == HDAA_GPIO_INPUT(i)) { gmask |= (1 << i); gdir &= ~(1 << i); } } HDA_BOOTVERBOSE( device_printf(devinfo->dev, "GPIO commit\n"); ); hda_command(devinfo->dev, HDA_CMD_SET_GPIO_ENABLE_MASK(0, devinfo->nid, gmask)); hda_command(devinfo->dev, HDA_CMD_SET_GPIO_DIRECTION(0, devinfo->nid, gdir)); hda_command(devinfo->dev, HDA_CMD_SET_GPIO_DATA(0, devinfo->nid, gdata)); HDA_BOOTVERBOSE( hdaa_dump_gpio(devinfo); ); } } static void hdaa_gpo_commit(struct hdaa_devinfo *devinfo) { uint32_t gdata; int i, numgpo; numgpo = HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap); if (devinfo->gpo != 0 && numgpo != 0) { gdata = hda_command(devinfo->dev, HDA_CMD_GET_GPO_DATA(0, devinfo->nid)); for (i = 0; i < numgpo; i++) { if ((devinfo->gpio & HDAA_GPIO_MASK(i)) == HDAA_GPIO_SET(i)) { gdata |= (1 << i); } else if ((devinfo->gpio & HDAA_GPIO_MASK(i)) == HDAA_GPIO_CLEAR(i)) { gdata &= ~(1 << i); } } HDA_BOOTVERBOSE( device_printf(devinfo->dev, "GPO commit\n"); ); hda_command(devinfo->dev, HDA_CMD_SET_GPO_DATA(0, devinfo->nid, gdata)); HDA_BOOTVERBOSE( hdaa_dump_gpo(devinfo); ); } } static void hdaa_audio_commit(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; int i; /* Commit controls. */ hdaa_audio_ctl_commit(devinfo); /* Commit selectors, pins and EAPD. */ for (i = 0; i < devinfo->nodecnt; i++) { w = &devinfo->widget[i]; if (w == NULL) continue; if (w->selconn == -1) w->selconn = 0; if (w->nconns > 0) hdaa_widget_connection_select(w, w->selconn); if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->waspin) { hda_command(devinfo->dev, HDA_CMD_SET_PIN_WIDGET_CTRL(0, w->nid, w->wclass.pin.ctrl)); } if (w->param.eapdbtl != HDA_INVALID) { uint32_t val; val = w->param.eapdbtl; if (devinfo->quirks & HDAA_QUIRK_EAPDINV) val ^= HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD; hda_command(devinfo->dev, HDA_CMD_SET_EAPD_BTL_ENABLE(0, w->nid, val)); } } hdaa_gpio_commit(devinfo); hdaa_gpo_commit(devinfo); } static void hdaa_powerup(struct hdaa_devinfo *devinfo) { int i; hda_command(devinfo->dev, HDA_CMD_SET_POWER_STATE(0, devinfo->nid, HDA_CMD_POWER_STATE_D0)); DELAY(100); for (i = devinfo->startnode; i < devinfo->endnode; i++) { hda_command(devinfo->dev, HDA_CMD_SET_POWER_STATE(0, i, HDA_CMD_POWER_STATE_D0)); } DELAY(1000); } static int hdaa_pcmchannel_setup(struct hdaa_chan *ch) { struct hdaa_devinfo *devinfo = ch->devinfo; struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w; uint32_t cap, fmtcap, pcmcap; int i, j, ret, channels, onlystereo; uint16_t pinset; ch->caps = hdaa_caps; ch->caps.fmtlist = ch->fmtlist; ch->bit16 = 1; ch->bit32 = 0; ch->pcmrates[0] = 48000; ch->pcmrates[1] = 0; ch->stripecap = 0xff; ret = 0; channels = 0; onlystereo = 1; pinset = 0; fmtcap = devinfo->supp_stream_formats; pcmcap = devinfo->supp_pcm_size_rate; for (i = 0; i < 16; i++) { /* Check as is correct */ if (ch->as < 0) break; /* Cound only present DACs */ if (as[ch->as].dacs[ch->asindex][i] <= 0) continue; /* Ignore duplicates */ for (j = 0; j < ret; j++) { if (ch->io[j] == as[ch->as].dacs[ch->asindex][i]) break; } if (j < ret) continue; w = hdaa_widget_get(devinfo, as[ch->as].dacs[ch->asindex][i]); if (w == NULL || w->enable == 0) continue; cap = w->param.supp_stream_formats; if (!HDA_PARAM_SUPP_STREAM_FORMATS_PCM(cap) && !HDA_PARAM_SUPP_STREAM_FORMATS_AC3(cap)) continue; /* Many CODECs does not declare AC3 support on SPDIF. I don't beleave that they doesn't support it! */ if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) cap |= HDA_PARAM_SUPP_STREAM_FORMATS_AC3_MASK; if (ret == 0) { fmtcap = cap; pcmcap = w->param.supp_pcm_size_rate; } else { fmtcap &= cap; pcmcap &= w->param.supp_pcm_size_rate; } ch->io[ret++] = as[ch->as].dacs[ch->asindex][i]; ch->stripecap &= w->wclass.conv.stripecap; /* Do not count redirection pin/dac channels. */ if (i == 15 && as[ch->as].hpredir >= 0) continue; channels += HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap) + 1; if (HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap) != 1) onlystereo = 0; pinset |= (1 << i); } ch->io[ret] = -1; ch->channels = channels; if (as[ch->as].fakeredir) ret--; /* Standard speaks only about stereo pins and playback, ... */ if ((!onlystereo) || as[ch->as].mixed) pinset = 0; /* ..., but there it gives us info about speakers layout. */ as[ch->as].pinset = pinset; ch->supp_stream_formats = fmtcap; ch->supp_pcm_size_rate = pcmcap; /* * 8bit = 0 * 16bit = 1 * 20bit = 2 * 24bit = 3 * 32bit = 4 */ if (ret > 0) { i = 0; if (HDA_PARAM_SUPP_STREAM_FORMATS_PCM(fmtcap)) { if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16BIT(pcmcap)) ch->bit16 = 1; else if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8BIT(pcmcap)) ch->bit16 = 0; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_24BIT(pcmcap)) ch->bit32 = 3; else if (HDA_PARAM_SUPP_PCM_SIZE_RATE_20BIT(pcmcap)) ch->bit32 = 2; else if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32BIT(pcmcap)) ch->bit32 = 4; if (!(devinfo->quirks & HDAA_QUIRK_FORCESTEREO)) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 1, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 1, 0); } if (channels >= 2) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 2, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 2, 0); } if (channels >= 3 && !onlystereo) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 3, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 3, 0); ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 3, 1); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 3, 1); } if (channels >= 4) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 4, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 4, 0); if (!onlystereo) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 4, 1); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 4, 1); } } if (channels >= 5 && !onlystereo) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 5, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 5, 0); ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 5, 1); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 5, 1); } if (channels >= 6) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 6, 1); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 6, 1); if (!onlystereo) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 6, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 6, 0); } } if (channels >= 7 && !onlystereo) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 7, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 7, 0); ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 7, 1); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 7, 1); } if (channels >= 8) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 8, 1); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 8, 1); } } if (HDA_PARAM_SUPP_STREAM_FORMATS_AC3(fmtcap)) { ch->fmtlist[i++] = SND_FORMAT(AFMT_AC3, 2, 0); if (channels >= 8) { ch->fmtlist[i++] = SND_FORMAT(AFMT_AC3, 8, 0); ch->fmtlist[i++] = SND_FORMAT(AFMT_AC3, 8, 1); } } ch->fmtlist[i] = 0; i = 0; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8KHZ(pcmcap)) ch->pcmrates[i++] = 8000; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_11KHZ(pcmcap)) ch->pcmrates[i++] = 11025; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16KHZ(pcmcap)) ch->pcmrates[i++] = 16000; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_22KHZ(pcmcap)) ch->pcmrates[i++] = 22050; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32KHZ(pcmcap)) ch->pcmrates[i++] = 32000; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_44KHZ(pcmcap)) ch->pcmrates[i++] = 44100; /* if (HDA_PARAM_SUPP_PCM_SIZE_RATE_48KHZ(pcmcap)) */ ch->pcmrates[i++] = 48000; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_88KHZ(pcmcap)) ch->pcmrates[i++] = 88200; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_96KHZ(pcmcap)) ch->pcmrates[i++] = 96000; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_176KHZ(pcmcap)) ch->pcmrates[i++] = 176400; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_192KHZ(pcmcap)) ch->pcmrates[i++] = 192000; /* if (HDA_PARAM_SUPP_PCM_SIZE_RATE_384KHZ(pcmcap)) */ ch->pcmrates[i] = 0; if (i > 0) { ch->caps.minspeed = ch->pcmrates[0]; ch->caps.maxspeed = ch->pcmrates[i - 1]; } } return (ret); } static void hdaa_prepare_pcms(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; int i, j, k, apdev = 0, ardev = 0, dpdev = 0, drdev = 0; for (i = 0; i < devinfo->ascnt; i++) { if (as[i].enable == 0) continue; if (as[i].dir == HDAA_CTL_IN) { if (as[i].digital) drdev++; else ardev++; } else { if (as[i].digital) dpdev++; else apdev++; } } devinfo->num_devs = max(ardev, apdev) + max(drdev, dpdev); devinfo->devs = (struct hdaa_pcm_devinfo *)malloc( devinfo->num_devs * sizeof(struct hdaa_pcm_devinfo), M_HDAA, M_ZERO | M_NOWAIT); if (devinfo->devs == NULL) { device_printf(devinfo->dev, "Unable to allocate memory for devices\n"); return; } for (i = 0; i < devinfo->num_devs; i++) { devinfo->devs[i].index = i; devinfo->devs[i].devinfo = devinfo; devinfo->devs[i].playas = -1; devinfo->devs[i].recas = -1; devinfo->devs[i].digital = 255; } for (i = 0; i < devinfo->ascnt; i++) { if (as[i].enable == 0) continue; for (j = 0; j < devinfo->num_devs; j++) { if (devinfo->devs[j].digital != 255 && (!devinfo->devs[j].digital) != (!as[i].digital)) continue; if (as[i].dir == HDAA_CTL_IN) { if (devinfo->devs[j].recas >= 0) continue; devinfo->devs[j].recas = i; } else { if (devinfo->devs[j].playas >= 0) continue; devinfo->devs[j].playas = i; } as[i].pdevinfo = &devinfo->devs[j]; for (k = 0; k < as[i].num_chans; k++) { devinfo->chans[as[i].chans[k]].pdevinfo = &devinfo->devs[j]; } devinfo->devs[j].digital = as[i].digital; break; } } } static void hdaa_create_pcms(struct hdaa_devinfo *devinfo) { int i; for (i = 0; i < devinfo->num_devs; i++) { struct hdaa_pcm_devinfo *pdevinfo = &devinfo->devs[i]; pdevinfo->dev = device_add_child(devinfo->dev, "pcm", -1); device_set_ivars(pdevinfo->dev, (void *)pdevinfo); } } static void hdaa_dump_ctls(struct hdaa_pcm_devinfo *pdevinfo, const char *banner, uint32_t flag) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_audio_ctl *ctl; char buf[64]; int i, j, printed = 0; if (flag == 0) { flag = ~(SOUND_MASK_VOLUME | SOUND_MASK_PCM | SOUND_MASK_CD | SOUND_MASK_LINE | SOUND_MASK_RECLEV | SOUND_MASK_MIC | SOUND_MASK_SPEAKER | SOUND_MASK_IGAIN | SOUND_MASK_OGAIN | SOUND_MASK_IMIX | SOUND_MASK_MONITOR); } for (j = 0; j < SOUND_MIXER_NRDEVICES; j++) { if ((flag & (1 << j)) == 0) continue; i = 0; printed = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { if (ctl->enable == 0 || ctl->widget->enable == 0) continue; if (!((pdevinfo->playas >= 0 && ctl->widget->bindas == pdevinfo->playas) || (pdevinfo->recas >= 0 && ctl->widget->bindas == pdevinfo->recas) || (ctl->widget->bindas == -2 && pdevinfo->index == 0))) continue; if ((ctl->ossmask & (1 << j)) == 0) continue; if (printed == 0) { if (banner != NULL) { device_printf(pdevinfo->dev, "%s", banner); } else { device_printf(pdevinfo->dev, "Unknown Ctl"); } printf(" (OSS: %s)", hdaa_audio_ctl_ossmixer_mask2allname(1 << j, buf, sizeof(buf))); if (pdevinfo->ossmask & (1 << j)) { printf(": %+d/%+ddB\n", pdevinfo->minamp[j] / 4, pdevinfo->maxamp[j] / 4); } else printf("\n"); printed = 1; } device_printf(pdevinfo->dev, " +- ctl %2d (nid %3d %s", i, ctl->widget->nid, (ctl->ndir == HDAA_CTL_IN)?"in ":"out"); if (ctl->ndir == HDAA_CTL_IN && ctl->ndir == ctl->dir) printf(" %2d): ", ctl->index); else printf("): "); if (ctl->step > 0) { printf("%+d/%+ddB (%d steps)%s\n", MINQDB(ctl) / 4, MAXQDB(ctl) / 4, ctl->step + 1, ctl->mute?" + mute":""); } else printf("%s\n", ctl->mute?"mute":""); } } if (printed) device_printf(pdevinfo->dev, "\n"); } static void hdaa_dump_audio_formats(device_t dev, uint32_t fcap, uint32_t pcmcap) { uint32_t cap; cap = fcap; if (cap != 0) { device_printf(dev, " Stream cap: 0x%08x", cap); if (HDA_PARAM_SUPP_STREAM_FORMATS_AC3(cap)) printf(" AC3"); if (HDA_PARAM_SUPP_STREAM_FORMATS_FLOAT32(cap)) printf(" FLOAT32"); if (HDA_PARAM_SUPP_STREAM_FORMATS_PCM(cap)) printf(" PCM"); printf("\n"); } cap = pcmcap; if (cap != 0) { device_printf(dev, " PCM cap: 0x%08x", cap); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8BIT(cap)) printf(" 8"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16BIT(cap)) printf(" 16"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_20BIT(cap)) printf(" 20"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_24BIT(cap)) printf(" 24"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32BIT(cap)) printf(" 32"); printf(" bits,"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8KHZ(cap)) printf(" 8"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_11KHZ(cap)) printf(" 11"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16KHZ(cap)) printf(" 16"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_22KHZ(cap)) printf(" 22"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32KHZ(cap)) printf(" 32"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_44KHZ(cap)) printf(" 44"); printf(" 48"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_88KHZ(cap)) printf(" 88"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_96KHZ(cap)) printf(" 96"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_176KHZ(cap)) printf(" 176"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_192KHZ(cap)) printf(" 192"); printf(" KHz\n"); } } static void hdaa_dump_pin(struct hdaa_widget *w) { uint32_t pincap; pincap = w->wclass.pin.cap; device_printf(w->devinfo->dev, " Pin cap: 0x%08x", pincap); if (HDA_PARAM_PIN_CAP_IMP_SENSE_CAP(pincap)) printf(" ISC"); if (HDA_PARAM_PIN_CAP_TRIGGER_REQD(pincap)) printf(" TRQD"); if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(pincap)) printf(" PDC"); if (HDA_PARAM_PIN_CAP_HEADPHONE_CAP(pincap)) printf(" HP"); if (HDA_PARAM_PIN_CAP_OUTPUT_CAP(pincap)) printf(" OUT"); if (HDA_PARAM_PIN_CAP_INPUT_CAP(pincap)) printf(" IN"); if (HDA_PARAM_PIN_CAP_BALANCED_IO_PINS(pincap)) printf(" BAL"); if (HDA_PARAM_PIN_CAP_HDMI(pincap)) printf(" HDMI"); if (HDA_PARAM_PIN_CAP_VREF_CTRL(pincap)) { printf(" VREF["); if (HDA_PARAM_PIN_CAP_VREF_CTRL_50(pincap)) printf(" 50"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_80(pincap)) printf(" 80"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_100(pincap)) printf(" 100"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_GROUND(pincap)) printf(" GROUND"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_HIZ(pincap)) printf(" HIZ"); printf(" ]"); } if (HDA_PARAM_PIN_CAP_EAPD_CAP(pincap)) printf(" EAPD"); if (HDA_PARAM_PIN_CAP_DP(pincap)) printf(" DP"); if (HDA_PARAM_PIN_CAP_HBR(pincap)) printf(" HBR"); printf("\n"); device_printf(w->devinfo->dev, " Pin config: 0x%08x\n", w->wclass.pin.config); device_printf(w->devinfo->dev, " Pin control: 0x%08x", w->wclass.pin.ctrl); if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_HPHN_ENABLE) printf(" HP"); if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE) printf(" IN"); if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE) printf(" OUT"); if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) { if ((w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) == 0x03) printf(" HBR"); else if ((w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) != 0) printf(" EPTs"); } else { if ((w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) != 0) printf(" VREFs"); } printf("\n"); } static void hdaa_dump_pin_config(struct hdaa_widget *w, uint32_t conf) { device_printf(w->devinfo->dev, "%2d %08x %-2d %-2d " "%-13s %-5s %-7s %-10s %-7s %d%s\n", w->nid, conf, HDA_CONFIG_DEFAULTCONF_ASSOCIATION(conf), HDA_CONFIG_DEFAULTCONF_SEQUENCE(conf), HDA_DEVS[HDA_CONFIG_DEFAULTCONF_DEVICE(conf)], HDA_CONNS[HDA_CONFIG_DEFAULTCONF_CONNECTIVITY(conf)], HDA_CONNECTORS[HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE(conf)], HDA_LOCS[HDA_CONFIG_DEFAULTCONF_LOCATION(conf)], HDA_COLORS[HDA_CONFIG_DEFAULTCONF_COLOR(conf)], HDA_CONFIG_DEFAULTCONF_MISC(conf), (w->enable == 0)?" DISA":""); } static void hdaa_dump_pin_configs(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; int i; device_printf(devinfo->dev, "nid 0x as seq " "device conn jack loc color misc\n"); for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; hdaa_dump_pin_config(w, w->wclass.pin.config); } } static void hdaa_dump_amp(device_t dev, uint32_t cap, const char *banner) { int offset, size, step; offset = HDA_PARAM_OUTPUT_AMP_CAP_OFFSET(cap); size = HDA_PARAM_OUTPUT_AMP_CAP_STEPSIZE(cap); step = HDA_PARAM_OUTPUT_AMP_CAP_NUMSTEPS(cap); device_printf(dev, " %s amp: 0x%08x " "mute=%d step=%d size=%d offset=%d (%+d/%+ddB)\n", banner, cap, HDA_PARAM_OUTPUT_AMP_CAP_MUTE_CAP(cap), step, size, offset, ((0 - offset) * (size + 1)) / 4, ((step - offset) * (size + 1)) / 4); } static void hdaa_dump_nodes(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w, *cw; char buf[64]; int i, j; device_printf(devinfo->dev, "\n"); device_printf(devinfo->dev, "Default parameters:\n"); hdaa_dump_audio_formats(devinfo->dev, devinfo->supp_stream_formats, devinfo->supp_pcm_size_rate); hdaa_dump_amp(devinfo->dev, devinfo->inamp_cap, " Input"); hdaa_dump_amp(devinfo->dev, devinfo->outamp_cap, "Output"); for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL) { device_printf(devinfo->dev, "Ghost widget nid=%d\n", i); continue; } device_printf(devinfo->dev, "\n"); device_printf(devinfo->dev, " nid: %d%s\n", w->nid, (w->enable == 0) ? " [DISABLED]" : ""); device_printf(devinfo->dev, " Name: %s\n", w->name); device_printf(devinfo->dev, " Widget cap: 0x%08x", w->param.widget_cap); if (w->param.widget_cap & 0x0ee1) { if (HDA_PARAM_AUDIO_WIDGET_CAP_LR_SWAP(w->param.widget_cap)) printf(" LRSWAP"); if (HDA_PARAM_AUDIO_WIDGET_CAP_POWER_CTRL(w->param.widget_cap)) printf(" PWR"); if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) printf(" DIGITAL"); if (HDA_PARAM_AUDIO_WIDGET_CAP_UNSOL_CAP(w->param.widget_cap)) printf(" UNSOL"); if (HDA_PARAM_AUDIO_WIDGET_CAP_PROC_WIDGET(w->param.widget_cap)) printf(" PROC"); if (HDA_PARAM_AUDIO_WIDGET_CAP_STRIPE(w->param.widget_cap)) printf(" STRIPE(x%d)", 1 << (fls(w->wclass.conv.stripecap) - 1)); j = HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap); if (j == 1) printf(" STEREO"); else if (j > 1) printf(" %dCH", j + 1); } printf("\n"); if (w->bindas != -1) { device_printf(devinfo->dev, " Association: %d (0x%04x)\n", w->bindas, w->bindseqmask); } if (w->ossmask != 0 || w->ossdev >= 0) { device_printf(devinfo->dev, " OSS: %s", hdaa_audio_ctl_ossmixer_mask2allname(w->ossmask, buf, sizeof(buf))); if (w->ossdev >= 0) printf(" (%s)", ossnames[w->ossdev]); printf("\n"); } if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) { hdaa_dump_audio_formats(devinfo->dev, w->param.supp_stream_formats, w->param.supp_pcm_size_rate); } else if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->waspin) hdaa_dump_pin(w); if (w->param.eapdbtl != HDA_INVALID) device_printf(devinfo->dev, " EAPD: 0x%08x\n", w->param.eapdbtl); if (HDA_PARAM_AUDIO_WIDGET_CAP_OUT_AMP(w->param.widget_cap) && w->param.outamp_cap != 0) hdaa_dump_amp(devinfo->dev, w->param.outamp_cap, "Output"); if (HDA_PARAM_AUDIO_WIDGET_CAP_IN_AMP(w->param.widget_cap) && w->param.inamp_cap != 0) hdaa_dump_amp(devinfo->dev, w->param.inamp_cap, " Input"); if (w->nconns > 0) device_printf(devinfo->dev, " Connections: %d\n", w->nconns); for (j = 0; j < w->nconns; j++) { cw = hdaa_widget_get(devinfo, w->conns[j]); device_printf(devinfo->dev, " + %s<- nid=%d [%s]", (w->connsenable[j] == 0)?"[DISABLED] ":"", w->conns[j], (cw == NULL) ? "GHOST!" : cw->name); if (cw == NULL) printf(" [UNKNOWN]"); else if (cw->enable == 0) printf(" [DISABLED]"); if (w->nconns > 1 && w->selconn == j && w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) printf(" (selected)"); printf("\n"); } } } static void hdaa_dump_dst_nid(struct hdaa_pcm_devinfo *pdevinfo, nid_t nid, int depth) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w, *cw; char buf[64]; int i; if (depth > HDA_PARSE_MAXDEPTH) return; w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return; if (depth == 0) device_printf(pdevinfo->dev, "%*s", 4, ""); else device_printf(pdevinfo->dev, "%*s + <- ", 4 + (depth - 1) * 7, ""); printf("nid=%d [%s]", w->nid, w->name); if (depth > 0) { if (w->ossmask == 0) { printf("\n"); return; } printf(" [src: %s]", hdaa_audio_ctl_ossmixer_mask2allname( w->ossmask, buf, sizeof(buf))); if (w->ossdev >= 0) { printf("\n"); return; } } printf("\n"); for (i = 0; i < w->nconns; i++) { if (w->connsenable[i] == 0) continue; cw = hdaa_widget_get(devinfo, w->conns[i]); if (cw == NULL || cw->enable == 0 || cw->bindas == -1) continue; hdaa_dump_dst_nid(pdevinfo, w->conns[i], depth + 1); } } static void hdaa_dump_dac(struct hdaa_pcm_devinfo *pdevinfo) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_audio_as *as; struct hdaa_widget *w; nid_t *nids; int chid, i; if (pdevinfo->playas < 0) return; device_printf(pdevinfo->dev, "Playback:\n"); chid = devinfo->as[pdevinfo->playas].chans[0]; hdaa_dump_audio_formats(pdevinfo->dev, devinfo->chans[chid].supp_stream_formats, devinfo->chans[chid].supp_pcm_size_rate); for (i = 0; i < devinfo->as[pdevinfo->playas].num_chans; i++) { chid = devinfo->as[pdevinfo->playas].chans[i]; device_printf(pdevinfo->dev, " DAC:"); for (nids = devinfo->chans[chid].io; *nids != -1; nids++) printf(" %d", *nids); printf("\n"); } as = &devinfo->as[pdevinfo->playas]; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; w = hdaa_widget_get(devinfo, as->pins[i]); if (w == NULL || w->enable == 0) continue; device_printf(pdevinfo->dev, "\n"); hdaa_dump_dst_nid(pdevinfo, as->pins[i], 0); } device_printf(pdevinfo->dev, "\n"); } static void hdaa_dump_adc(struct hdaa_pcm_devinfo *pdevinfo) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w; nid_t *nids; int chid, i; if (pdevinfo->recas < 0) return; device_printf(pdevinfo->dev, "Record:\n"); chid = devinfo->as[pdevinfo->recas].chans[0]; hdaa_dump_audio_formats(pdevinfo->dev, devinfo->chans[chid].supp_stream_formats, devinfo->chans[chid].supp_pcm_size_rate); for (i = 0; i < devinfo->as[pdevinfo->recas].num_chans; i++) { chid = devinfo->as[pdevinfo->recas].chans[i]; device_printf(pdevinfo->dev, " ADC:"); for (nids = devinfo->chans[chid].io; *nids != -1; nids++) printf(" %d", *nids); printf("\n"); } for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) continue; if (w->bindas != pdevinfo->recas) continue; device_printf(pdevinfo->dev, "\n"); hdaa_dump_dst_nid(pdevinfo, i, 0); } device_printf(pdevinfo->dev, "\n"); } static void hdaa_dump_mix(struct hdaa_pcm_devinfo *pdevinfo) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w; int i; int printed = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->ossdev != SOUND_MIXER_IMIX) continue; if (w->bindas != pdevinfo->recas) continue; if (printed == 0) { printed = 1; device_printf(pdevinfo->dev, "Input Mix:\n"); } device_printf(pdevinfo->dev, "\n"); hdaa_dump_dst_nid(pdevinfo, i, 0); } if (printed) device_printf(pdevinfo->dev, "\n"); } static void hdaa_pindump(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_widget *w; uint32_t res, pincap, delay; int i; device_printf(dev, "Dumping AFG pins:\n"); device_printf(dev, "nid 0x as seq " "device conn jack loc color misc\n"); for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; hdaa_dump_pin_config(w, w->wclass.pin.config); pincap = w->wclass.pin.cap; device_printf(dev, " Caps: %2s %3s %2s %4s %4s", HDA_PARAM_PIN_CAP_INPUT_CAP(pincap)?"IN":"", HDA_PARAM_PIN_CAP_OUTPUT_CAP(pincap)?"OUT":"", HDA_PARAM_PIN_CAP_HEADPHONE_CAP(pincap)?"HP":"", HDA_PARAM_PIN_CAP_EAPD_CAP(pincap)?"EAPD":"", HDA_PARAM_PIN_CAP_VREF_CTRL(pincap)?"VREF":""); if (HDA_PARAM_PIN_CAP_IMP_SENSE_CAP(pincap) || HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(pincap)) { if (HDA_PARAM_PIN_CAP_TRIGGER_REQD(pincap)) { delay = 0; hda_command(dev, HDA_CMD_SET_PIN_SENSE(0, w->nid, 0)); do { res = hda_command(dev, HDA_CMD_GET_PIN_SENSE(0, w->nid)); if (res != 0x7fffffff && res != 0xffffffff) break; DELAY(10); } while (++delay < 10000); } else { delay = 0; res = hda_command(dev, HDA_CMD_GET_PIN_SENSE(0, w->nid)); } printf(" Sense: 0x%08x (%sconnected%s)", res, (res & HDA_CMD_GET_PIN_SENSE_PRESENCE_DETECT) ? "" : "dis", (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap) && (res & HDA_CMD_GET_PIN_SENSE_ELD_VALID)) ? ", ELD valid" : ""); if (delay > 0) printf(" delay %dus", delay * 10); } printf("\n"); } device_printf(dev, "NumGPIO=%d NumGPO=%d NumGPI=%d GPIWake=%d GPIUnsol=%d\n", HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_GPI_WAKE(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_GPI_UNSOL(devinfo->gpio_cap)); hdaa_dump_gpi(devinfo); hdaa_dump_gpio(devinfo); hdaa_dump_gpo(devinfo); } static void hdaa_configure(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_audio_ctl *ctl; int i; HDA_BOOTHVERBOSE( device_printf(dev, "Applying built-in patches...\n"); ); hdaa_patch(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Applying local patches...\n"); ); hdaa_local_patch(devinfo); hdaa_audio_postprocess(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Parsing Ctls...\n"); ); hdaa_audio_ctl_parse(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling nonaudio...\n"); ); hdaa_audio_disable_nonaudio(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling useless...\n"); ); hdaa_audio_disable_useless(devinfo); HDA_BOOTVERBOSE( device_printf(dev, "Patched pins configuration:\n"); hdaa_dump_pin_configs(devinfo); ); HDA_BOOTHVERBOSE( device_printf(dev, "Parsing pin associations...\n"); ); hdaa_audio_as_parse(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Building AFG tree...\n"); ); hdaa_audio_build_tree(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling unassociated " "widgets...\n"); ); hdaa_audio_disable_unas(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling nonselected " "inputs...\n"); ); hdaa_audio_disable_notselected(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling useless...\n"); ); hdaa_audio_disable_useless(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling " "crossassociatement connections...\n"); ); hdaa_audio_disable_crossas(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling useless...\n"); ); hdaa_audio_disable_useless(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Binding associations to channels...\n"); ); hdaa_audio_bind_as(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Assigning names to signal sources...\n"); ); hdaa_audio_assign_names(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Preparing PCM devices...\n"); ); hdaa_prepare_pcms(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Assigning mixers to the tree...\n"); ); hdaa_audio_assign_mixers(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Preparing pin controls...\n"); ); hdaa_audio_prepare_pin_ctrl(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "AFG commit...\n"); ); hdaa_audio_commit(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Applying direct built-in patches...\n"); ); hdaa_patch_direct(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Pin sense init...\n"); ); hdaa_sense_init(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Creating PCM devices...\n"); ); hdaa_create_pcms(devinfo); HDA_BOOTVERBOSE( if (devinfo->quirks != 0) { device_printf(dev, "FG config/quirks:"); for (i = 0; i < nitems(hdaa_quirks_tab); i++) { if ((devinfo->quirks & hdaa_quirks_tab[i].value) == hdaa_quirks_tab[i].value) printf(" %s", hdaa_quirks_tab[i].key); } printf("\n"); } ); HDA_BOOTHVERBOSE( device_printf(dev, "\n"); device_printf(dev, "+-----------+\n"); device_printf(dev, "| HDA NODES |\n"); device_printf(dev, "+-----------+\n"); hdaa_dump_nodes(devinfo); device_printf(dev, "\n"); device_printf(dev, "+----------------+\n"); device_printf(dev, "| HDA AMPLIFIERS |\n"); device_printf(dev, "+----------------+\n"); device_printf(dev, "\n"); i = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { device_printf(dev, "%3d: nid %3d %s (%s) index %d", i, (ctl->widget != NULL) ? ctl->widget->nid : -1, (ctl->ndir == HDAA_CTL_IN)?"in ":"out", (ctl->dir == HDAA_CTL_IN)?"in ":"out", ctl->index); if (ctl->childwidget != NULL) printf(" cnid %3d", ctl->childwidget->nid); else printf(" "); printf(" ossmask=0x%08x\n", ctl->ossmask); device_printf(dev, " mute: %d step: %3d size: %3d off: %3d%s\n", ctl->mute, ctl->step, ctl->size, ctl->offset, (ctl->enable == 0) ? " [DISABLED]" : ((ctl->ossmask == 0) ? " [UNUSED]" : "")); } device_printf(dev, "\n"); ); } static void hdaa_unconfigure(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_widget *w; int i, j; HDA_BOOTHVERBOSE( device_printf(dev, "Pin sense deinit...\n"); ); hdaa_sense_deinit(devinfo); free(devinfo->ctl, M_HDAA); devinfo->ctl = NULL; devinfo->ctlcnt = 0; free(devinfo->as, M_HDAA); devinfo->as = NULL; devinfo->ascnt = 0; free(devinfo->devs, M_HDAA); devinfo->devs = NULL; devinfo->num_devs = 0; free(devinfo->chans, M_HDAA); devinfo->chans = NULL; devinfo->num_chans = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL) continue; w->enable = 1; w->selconn = -1; w->pflags = 0; w->bindas = -1; w->bindseqmask = 0; w->ossdev = -1; w->ossmask = 0; for (j = 0; j < w->nconns; j++) w->connsenable[j] = 1; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) w->wclass.pin.config = w->wclass.pin.newconf; if (w->eld != NULL) { w->eld_len = 0; free(w->eld, M_HDAA); w->eld = NULL; } } } static int hdaa_sysctl_gpi_state(SYSCTL_HANDLER_ARGS) { struct hdaa_devinfo *devinfo = oidp->oid_arg1; device_t dev = devinfo->dev; char buf[256]; int n = 0, i, numgpi; uint32_t data = 0; buf[0] = 0; hdaa_lock(devinfo); numgpi = HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap); if (numgpi > 0) { data = hda_command(dev, HDA_CMD_GET_GPI_DATA(0, devinfo->nid)); } hdaa_unlock(devinfo); for (i = 0; i < numgpi; i++) { n += snprintf(buf + n, sizeof(buf) - n, "%s%d=%d", n != 0 ? " " : "", i, ((data >> i) & 1)); } return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); } static int hdaa_sysctl_gpio_state(SYSCTL_HANDLER_ARGS) { struct hdaa_devinfo *devinfo = oidp->oid_arg1; device_t dev = devinfo->dev; char buf[256]; int n = 0, i, numgpio; uint32_t data = 0, enable = 0, dir = 0; buf[0] = 0; hdaa_lock(devinfo); numgpio = HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap); if (numgpio > 0) { data = hda_command(dev, HDA_CMD_GET_GPIO_DATA(0, devinfo->nid)); enable = hda_command(dev, HDA_CMD_GET_GPIO_ENABLE_MASK(0, devinfo->nid)); dir = hda_command(dev, HDA_CMD_GET_GPIO_DIRECTION(0, devinfo->nid)); } hdaa_unlock(devinfo); for (i = 0; i < numgpio; i++) { n += snprintf(buf + n, sizeof(buf) - n, "%s%d=", n != 0 ? " " : "", i); if ((enable & (1 << i)) == 0) { n += snprintf(buf + n, sizeof(buf) - n, "disabled"); continue; } n += snprintf(buf + n, sizeof(buf) - n, "%sput(%d)", ((dir >> i) & 1) ? "out" : "in", ((data >> i) & 1)); } return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); } static int hdaa_sysctl_gpio_config(SYSCTL_HANDLER_ARGS) { struct hdaa_devinfo *devinfo = oidp->oid_arg1; char buf[256]; int error, n = 0, i, numgpio; uint32_t gpio, x; gpio = devinfo->newgpio; numgpio = HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap); buf[0] = 0; for (i = 0; i < numgpio; i++) { x = (gpio & HDAA_GPIO_MASK(i)) >> HDAA_GPIO_SHIFT(i); n += snprintf(buf + n, sizeof(buf) - n, "%s%d=%s", n != 0 ? " " : "", i, HDA_GPIO_ACTIONS[x]); } error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); if (strncmp(buf, "0x", 2) == 0) gpio = strtol(buf + 2, NULL, 16); else gpio = hdaa_gpio_patch(gpio, buf); hdaa_lock(devinfo); devinfo->newgpio = devinfo->gpio = gpio; hdaa_gpio_commit(devinfo); hdaa_unlock(devinfo); return (0); } static int hdaa_sysctl_gpo_state(SYSCTL_HANDLER_ARGS) { struct hdaa_devinfo *devinfo = oidp->oid_arg1; device_t dev = devinfo->dev; char buf[256]; int n = 0, i, numgpo; uint32_t data = 0; buf[0] = 0; hdaa_lock(devinfo); numgpo = HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap); if (numgpo > 0) { data = hda_command(dev, HDA_CMD_GET_GPO_DATA(0, devinfo->nid)); } hdaa_unlock(devinfo); for (i = 0; i < numgpo; i++) { n += snprintf(buf + n, sizeof(buf) - n, "%s%d=%d", n != 0 ? " " : "", i, ((data >> i) & 1)); } return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); } static int hdaa_sysctl_gpo_config(SYSCTL_HANDLER_ARGS) { struct hdaa_devinfo *devinfo = oidp->oid_arg1; char buf[256]; int error, n = 0, i, numgpo; uint32_t gpo, x; gpo = devinfo->newgpo; numgpo = HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap); buf[0] = 0; for (i = 0; i < numgpo; i++) { x = (gpo & HDAA_GPIO_MASK(i)) >> HDAA_GPIO_SHIFT(i); n += snprintf(buf + n, sizeof(buf) - n, "%s%d=%s", n != 0 ? " " : "", i, HDA_GPIO_ACTIONS[x]); } error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); if (strncmp(buf, "0x", 2) == 0) gpo = strtol(buf + 2, NULL, 16); else gpo = hdaa_gpio_patch(gpo, buf); hdaa_lock(devinfo); devinfo->newgpo = devinfo->gpo = gpo; hdaa_gpo_commit(devinfo); hdaa_unlock(devinfo); return (0); } static int hdaa_sysctl_reconfig(SYSCTL_HANDLER_ARGS) { device_t dev; struct hdaa_devinfo *devinfo; int error, val; dev = oidp->oid_arg1; devinfo = device_get_softc(dev); if (devinfo == NULL) return (EINVAL); val = 0; error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL || val == 0) return (error); HDA_BOOTHVERBOSE( device_printf(dev, "Reconfiguration...\n"); ); if ((error = device_delete_children(dev)) != 0) return (error); hdaa_lock(devinfo); hdaa_unconfigure(dev); hdaa_configure(dev); hdaa_unlock(devinfo); bus_generic_attach(dev); HDA_BOOTHVERBOSE( device_printf(dev, "Reconfiguration done\n"); ); return (0); } static int hdaa_suspend(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); int i; HDA_BOOTHVERBOSE( device_printf(dev, "Suspend...\n"); ); hdaa_lock(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Stop streams...\n"); ); for (i = 0; i < devinfo->num_chans; i++) { if (devinfo->chans[i].flags & HDAA_CHN_RUNNING) { devinfo->chans[i].flags |= HDAA_CHN_SUSPEND; hdaa_channel_stop(&devinfo->chans[i]); } } HDA_BOOTHVERBOSE( device_printf(dev, "Power down FG" " nid=%d to the D3 state...\n", devinfo->nid); ); hda_command(devinfo->dev, HDA_CMD_SET_POWER_STATE(0, devinfo->nid, HDA_CMD_POWER_STATE_D3)); callout_stop(&devinfo->poll_jack); hdaa_unlock(devinfo); callout_drain(&devinfo->poll_jack); HDA_BOOTHVERBOSE( device_printf(dev, "Suspend done\n"); ); return (0); } static int hdaa_resume(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); int i; HDA_BOOTHVERBOSE( device_printf(dev, "Resume...\n"); ); hdaa_lock(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Power up audio FG nid=%d...\n", devinfo->nid); ); hdaa_powerup(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "AFG commit...\n"); ); hdaa_audio_commit(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Applying direct built-in patches...\n"); ); hdaa_patch_direct(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Pin sense init...\n"); ); hdaa_sense_init(devinfo); hdaa_unlock(devinfo); for (i = 0; i < devinfo->num_devs; i++) { struct hdaa_pcm_devinfo *pdevinfo = &devinfo->devs[i]; HDA_BOOTHVERBOSE( device_printf(pdevinfo->dev, "OSS mixer reinitialization...\n"); ); if (mixer_reinit(pdevinfo->dev) == -1) device_printf(pdevinfo->dev, "unable to reinitialize the mixer\n"); } hdaa_lock(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Start streams...\n"); ); for (i = 0; i < devinfo->num_chans; i++) { if (devinfo->chans[i].flags & HDAA_CHN_SUSPEND) { devinfo->chans[i].flags &= ~HDAA_CHN_SUSPEND; hdaa_channel_start(&devinfo->chans[i]); } } hdaa_unlock(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Resume done\n"); ); return (0); } static int hdaa_probe(device_t dev) { const char *pdesc; char buf[128]; if (hda_get_node_type(dev) != HDA_PARAM_FCT_GRP_TYPE_NODE_TYPE_AUDIO) return (ENXIO); pdesc = device_get_desc(device_get_parent(dev)); snprintf(buf, sizeof(buf), "%.*s Audio Function Group", (int)(strlen(pdesc) - 10), pdesc); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } static int hdaa_attach(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); uint32_t res; nid_t nid = hda_get_node_id(dev); devinfo->dev = dev; devinfo->lock = HDAC_GET_MTX(device_get_parent(dev), dev); devinfo->nid = nid; devinfo->newquirks = -1; devinfo->newgpio = -1; devinfo->newgpo = -1; callout_init(&devinfo->poll_jack, 1); devinfo->poll_ival = hz; hdaa_lock(devinfo); res = hda_command(dev, HDA_CMD_GET_PARAMETER(0 , nid, HDA_PARAM_SUB_NODE_COUNT)); hdaa_unlock(devinfo); devinfo->nodecnt = HDA_PARAM_SUB_NODE_COUNT_TOTAL(res); devinfo->startnode = HDA_PARAM_SUB_NODE_COUNT_START(res); devinfo->endnode = devinfo->startnode + devinfo->nodecnt; HDA_BOOTVERBOSE( device_printf(dev, "Subsystem ID: 0x%08x\n", hda_get_subsystem_id(dev)); ); HDA_BOOTHVERBOSE( device_printf(dev, "Audio Function Group at nid=%d: %d subnodes %d-%d\n", nid, devinfo->nodecnt, devinfo->startnode, devinfo->endnode - 1); ); if (devinfo->nodecnt > 0) devinfo->widget = (struct hdaa_widget *)malloc( sizeof(*(devinfo->widget)) * devinfo->nodecnt, M_HDAA, M_WAITOK | M_ZERO); else devinfo->widget = NULL; hdaa_lock(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Powering up...\n"); ); hdaa_powerup(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Parsing audio FG...\n"); ); hdaa_audio_parse(devinfo); HDA_BOOTVERBOSE( device_printf(dev, "Original pins configuration:\n"); hdaa_dump_pin_configs(devinfo); ); hdaa_configure(dev); hdaa_unlock(devinfo); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "config", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &devinfo->newquirks, 0, hdaa_sysctl_quirks, "A", "Configuration options"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "gpi_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, devinfo, 0, hdaa_sysctl_gpi_state, "A", "GPI state"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "gpio_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, devinfo, 0, hdaa_sysctl_gpio_state, "A", "GPIO state"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "gpio_config", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, devinfo, 0, hdaa_sysctl_gpio_config, "A", "GPIO configuration"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "gpo_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, devinfo, 0, hdaa_sysctl_gpo_state, "A", "GPO state"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "gpo_config", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, devinfo, 0, hdaa_sysctl_gpo_config, "A", "GPO configuration"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "reconfig", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, dev, 0, hdaa_sysctl_reconfig, "I", "Reprocess configuration"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "init_clear", CTLFLAG_RW, &devinfo->init_clear, 1,"Clear initial pin widget configuration"); bus_generic_attach(dev); return (0); } static int hdaa_detach(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); int error; if ((error = device_delete_children(dev)) != 0) return (error); hdaa_lock(devinfo); hdaa_unconfigure(dev); devinfo->poll_ival = 0; callout_stop(&devinfo->poll_jack); hdaa_unlock(devinfo); callout_drain(&devinfo->poll_jack); free(devinfo->widget, M_HDAA); return (0); } static int hdaa_print_child(device_t dev, device_t child) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_pcm_devinfo *pdevinfo = (struct hdaa_pcm_devinfo *)device_get_ivars(child); struct hdaa_audio_as *as; int retval, first = 1, i; retval = bus_print_child_header(dev, child); retval += printf(" at nid "); if (pdevinfo->playas >= 0) { as = &devinfo->as[pdevinfo->playas]; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; retval += printf("%s%d", first ? "" : ",", as->pins[i]); first = 0; } } if (pdevinfo->recas >= 0) { if (pdevinfo->playas >= 0) { retval += printf(" and "); first = 1; } as = &devinfo->as[pdevinfo->recas]; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; retval += printf("%s%d", first ? "" : ",", as->pins[i]); first = 0; } } retval += bus_print_child_footer(dev, child); return (retval); } static int -hdaa_child_location_str(device_t dev, device_t child, char *buf, - size_t buflen) +hdaa_child_location(device_t dev, device_t child, struct sbuf *sb) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_pcm_devinfo *pdevinfo = (struct hdaa_pcm_devinfo *)device_get_ivars(child); struct hdaa_audio_as *as; - int first = 1, i, len = 0; + int first = 1, i; - len += snprintf(buf + len, buflen - len, "nid="); + sbuf_printf(sb, "nid="); if (pdevinfo->playas >= 0) { as = &devinfo->as[pdevinfo->playas]; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; - len += snprintf(buf + len, buflen - len, - "%s%d", first ? "" : ",", as->pins[i]); + sbuf_printf(sb, "%s%d", first ? "" : ",", as->pins[i]); first = 0; } } if (pdevinfo->recas >= 0) { as = &devinfo->as[pdevinfo->recas]; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; - len += snprintf(buf + len, buflen - len, - "%s%d", first ? "" : ",", as->pins[i]); + sbuf_printf(sb, "%s%d", first ? "" : ",", as->pins[i]); first = 0; } } return (0); } static void hdaa_stream_intr(device_t dev, int dir, int stream) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_chan *ch; int i; for (i = 0; i < devinfo->num_chans; i++) { ch = &devinfo->chans[i]; if (!(ch->flags & HDAA_CHN_RUNNING)) continue; if (ch->dir == ((dir == 1) ? PCMDIR_PLAY : PCMDIR_REC) && ch->sid == stream) { hdaa_unlock(devinfo); chn_intr(ch->c); hdaa_lock(devinfo); } } } static void hdaa_unsol_intr(device_t dev, uint32_t resp) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_widget *w; int i, tag, flags; HDA_BOOTHVERBOSE( device_printf(dev, "Unsolicited response %08x\n", resp); ); tag = resp >> 26; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (w->unsol != tag) continue; if (HDA_PARAM_PIN_CAP_DP(w->wclass.pin.cap) || HDA_PARAM_PIN_CAP_HDMI(w->wclass.pin.cap)) flags = resp & 0x03; else flags = 0x01; if (flags & 0x01) hdaa_presence_handler(w); if (flags & 0x02) hdaa_eld_handler(w); } } static device_method_t hdaa_methods[] = { /* device interface */ DEVMETHOD(device_probe, hdaa_probe), DEVMETHOD(device_attach, hdaa_attach), DEVMETHOD(device_detach, hdaa_detach), DEVMETHOD(device_suspend, hdaa_suspend), DEVMETHOD(device_resume, hdaa_resume), /* Bus interface */ DEVMETHOD(bus_print_child, hdaa_print_child), - DEVMETHOD(bus_child_location_str, hdaa_child_location_str), + DEVMETHOD(bus_child_location, hdaa_child_location), DEVMETHOD(hdac_stream_intr, hdaa_stream_intr), DEVMETHOD(hdac_unsol_intr, hdaa_unsol_intr), DEVMETHOD(hdac_pindump, hdaa_pindump), DEVMETHOD_END }; static driver_t hdaa_driver = { "hdaa", hdaa_methods, sizeof(struct hdaa_devinfo), }; static devclass_t hdaa_devclass; DRIVER_MODULE(snd_hda, hdacc, hdaa_driver, hdaa_devclass, NULL, NULL); static void hdaa_chan_formula(struct hdaa_devinfo *devinfo, int asid, char *buf, int buflen) { struct hdaa_audio_as *as; int c; as = &devinfo->as[asid]; c = devinfo->chans[as->chans[0]].channels; if (c == 1) snprintf(buf, buflen, "mono"); else if (c == 2) { if (as->hpredir < 0) buf[0] = 0; else snprintf(buf, buflen, "2.0"); } else if (as->pinset == 0x0003) snprintf(buf, buflen, "3.1"); else if (as->pinset == 0x0005 || as->pinset == 0x0011) snprintf(buf, buflen, "4.0"); else if (as->pinset == 0x0007 || as->pinset == 0x0013) snprintf(buf, buflen, "5.1"); else if (as->pinset == 0x0017) snprintf(buf, buflen, "7.1"); else snprintf(buf, buflen, "%dch", c); if (as->hpredir >= 0) strlcat(buf, "+HP", buflen); } static int hdaa_chan_type(struct hdaa_devinfo *devinfo, int asid) { struct hdaa_audio_as *as; struct hdaa_widget *w; int i, t = -1, t1; as = &devinfo->as[asid]; for (i = 0; i < 16; i++) { w = hdaa_widget_get(devinfo, as->pins[i]); if (w == NULL || w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; t1 = HDA_CONFIG_DEFAULTCONF_DEVICE(w->wclass.pin.config); if (t == -1) t = t1; else if (t != t1) { t = -2; break; } } return (t); } static int hdaa_sysctl_32bit(SYSCTL_HANDLER_ARGS) { struct hdaa_audio_as *as = (struct hdaa_audio_as *)oidp->oid_arg1; struct hdaa_pcm_devinfo *pdevinfo = as->pdevinfo; struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_chan *ch; int error, val, i; uint32_t pcmcap; ch = &devinfo->chans[as->chans[0]]; val = (ch->bit32 == 4) ? 32 : ((ch->bit32 == 3) ? 24 : ((ch->bit32 == 2) ? 20 : 0)); error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); pcmcap = ch->supp_pcm_size_rate; if (val == 32 && HDA_PARAM_SUPP_PCM_SIZE_RATE_32BIT(pcmcap)) ch->bit32 = 4; else if (val == 24 && HDA_PARAM_SUPP_PCM_SIZE_RATE_24BIT(pcmcap)) ch->bit32 = 3; else if (val == 20 && HDA_PARAM_SUPP_PCM_SIZE_RATE_20BIT(pcmcap)) ch->bit32 = 2; else return (EINVAL); for (i = 1; i < as->num_chans; i++) devinfo->chans[as->chans[i]].bit32 = ch->bit32; return (0); } static int hdaa_pcm_probe(device_t dev) { struct hdaa_pcm_devinfo *pdevinfo = (struct hdaa_pcm_devinfo *)device_get_ivars(dev); struct hdaa_devinfo *devinfo = pdevinfo->devinfo; const char *pdesc; char chans1[8], chans2[8]; char buf[128]; int loc1, loc2, t1, t2; if (pdevinfo->playas >= 0) loc1 = devinfo->as[pdevinfo->playas].location; else loc1 = devinfo->as[pdevinfo->recas].location; if (pdevinfo->recas >= 0) loc2 = devinfo->as[pdevinfo->recas].location; else loc2 = loc1; if (loc1 != loc2) loc1 = -2; if (loc1 >= 0 && HDA_LOCS[loc1][0] == '0') loc1 = -2; chans1[0] = 0; chans2[0] = 0; t1 = t2 = -1; if (pdevinfo->playas >= 0) { hdaa_chan_formula(devinfo, pdevinfo->playas, chans1, sizeof(chans1)); t1 = hdaa_chan_type(devinfo, pdevinfo->playas); } if (pdevinfo->recas >= 0) { hdaa_chan_formula(devinfo, pdevinfo->recas, chans2, sizeof(chans2)); t2 = hdaa_chan_type(devinfo, pdevinfo->recas); } if (chans1[0] != 0 || chans2[0] != 0) { if (chans1[0] == 0 && pdevinfo->playas >= 0) snprintf(chans1, sizeof(chans1), "2.0"); else if (chans2[0] == 0 && pdevinfo->recas >= 0) snprintf(chans2, sizeof(chans2), "2.0"); if (strcmp(chans1, chans2) == 0) chans2[0] = 0; } if (t1 == -1) t1 = t2; else if (t2 == -1) t2 = t1; if (t1 != t2) t1 = -2; if (pdevinfo->digital) t1 = -2; pdesc = device_get_desc(device_get_parent(dev)); snprintf(buf, sizeof(buf), "%.*s (%s%s%s%s%s%s%s%s%s)", (int)(strlen(pdesc) - 21), pdesc, loc1 >= 0 ? HDA_LOCS[loc1] : "", loc1 >= 0 ? " " : "", (pdevinfo->digital == 0x7)?"HDMI/DP": ((pdevinfo->digital == 0x5)?"DisplayPort": ((pdevinfo->digital == 0x3)?"HDMI": ((pdevinfo->digital)?"Digital":"Analog"))), chans1[0] ? " " : "", chans1, chans2[0] ? "/" : "", chans2, t1 >= 0 ? " " : "", t1 >= 0 ? HDA_DEVS[t1] : ""); device_set_desc_copy(dev, buf); return (BUS_PROBE_SPECIFIC); } static int hdaa_pcm_attach(device_t dev) { struct hdaa_pcm_devinfo *pdevinfo = (struct hdaa_pcm_devinfo *)device_get_ivars(dev); struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_audio_as *as; struct snddev_info *d; char status[SND_STATUSLEN]; int i; pdevinfo->chan_size = pcm_getbuffersize(dev, HDA_BUFSZ_MIN, HDA_BUFSZ_DEFAULT, HDA_BUFSZ_MAX); HDA_BOOTVERBOSE( hdaa_dump_dac(pdevinfo); hdaa_dump_adc(pdevinfo); hdaa_dump_mix(pdevinfo); hdaa_dump_ctls(pdevinfo, "Master Volume", SOUND_MASK_VOLUME); hdaa_dump_ctls(pdevinfo, "PCM Volume", SOUND_MASK_PCM); hdaa_dump_ctls(pdevinfo, "CD Volume", SOUND_MASK_CD); hdaa_dump_ctls(pdevinfo, "Microphone Volume", SOUND_MASK_MIC); hdaa_dump_ctls(pdevinfo, "Microphone2 Volume", SOUND_MASK_MONITOR); hdaa_dump_ctls(pdevinfo, "Line-in Volume", SOUND_MASK_LINE); hdaa_dump_ctls(pdevinfo, "Speaker/Beep Volume", SOUND_MASK_SPEAKER); hdaa_dump_ctls(pdevinfo, "Recording Level", SOUND_MASK_RECLEV); hdaa_dump_ctls(pdevinfo, "Input Mix Level", SOUND_MASK_IMIX); hdaa_dump_ctls(pdevinfo, "Input Monitoring Level", SOUND_MASK_IGAIN); hdaa_dump_ctls(pdevinfo, NULL, 0); ); if (resource_int_value(device_get_name(dev), device_get_unit(dev), "blocksize", &i) == 0 && i > 0) { i &= HDA_BLK_ALIGN; if (i < HDA_BLK_MIN) i = HDA_BLK_MIN; pdevinfo->chan_blkcnt = pdevinfo->chan_size / i; i = 0; while (pdevinfo->chan_blkcnt >> i) i++; pdevinfo->chan_blkcnt = 1 << (i - 1); if (pdevinfo->chan_blkcnt < HDA_BDL_MIN) pdevinfo->chan_blkcnt = HDA_BDL_MIN; else if (pdevinfo->chan_blkcnt > HDA_BDL_MAX) pdevinfo->chan_blkcnt = HDA_BDL_MAX; } else pdevinfo->chan_blkcnt = HDA_BDL_DEFAULT; /* * We don't register interrupt handler with snd_setup_intr * in pcm device. Mark pcm device as MPSAFE manually. */ pcm_setflags(dev, pcm_getflags(dev) | SD_F_MPSAFE); HDA_BOOTHVERBOSE( device_printf(dev, "OSS mixer initialization...\n"); ); if (mixer_init(dev, &hdaa_audio_ctl_ossmixer_class, pdevinfo) != 0) device_printf(dev, "Can't register mixer\n"); HDA_BOOTHVERBOSE( device_printf(dev, "Registering PCM channels...\n"); ); if (pcm_register(dev, pdevinfo, (pdevinfo->playas >= 0)?1:0, (pdevinfo->recas >= 0)?1:0) != 0) device_printf(dev, "Can't register PCM\n"); pdevinfo->registered++; d = device_get_softc(dev); if (pdevinfo->playas >= 0) { as = &devinfo->as[pdevinfo->playas]; for (i = 0; i < as->num_chans; i++) pcm_addchan(dev, PCMDIR_PLAY, &hdaa_channel_class, &devinfo->chans[as->chans[i]]); SYSCTL_ADD_PROC(&d->play_sysctl_ctx, SYSCTL_CHILDREN(d->play_sysctl_tree), OID_AUTO, "32bit", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, as, sizeof(as), hdaa_sysctl_32bit, "I", "Resolution of 32bit samples (20/24/32bit)"); } if (pdevinfo->recas >= 0) { as = &devinfo->as[pdevinfo->recas]; for (i = 0; i < as->num_chans; i++) pcm_addchan(dev, PCMDIR_REC, &hdaa_channel_class, &devinfo->chans[as->chans[i]]); SYSCTL_ADD_PROC(&d->rec_sysctl_ctx, SYSCTL_CHILDREN(d->rec_sysctl_tree), OID_AUTO, "32bit", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, as, sizeof(as), hdaa_sysctl_32bit, "I", "Resolution of 32bit samples (20/24/32bit)"); pdevinfo->autorecsrc = 2; resource_int_value(device_get_name(dev), device_get_unit(dev), "rec.autosrc", &pdevinfo->autorecsrc); SYSCTL_ADD_INT(&d->rec_sysctl_ctx, SYSCTL_CHILDREN(d->rec_sysctl_tree), OID_AUTO, "autosrc", CTLFLAG_RW, &pdevinfo->autorecsrc, 0, "Automatic recording source selection"); } if (pdevinfo->mixer != NULL) { hdaa_audio_ctl_set_defaults(pdevinfo); hdaa_lock(devinfo); if (pdevinfo->playas >= 0) { as = &devinfo->as[pdevinfo->playas]; hdaa_channels_handler(as); } if (pdevinfo->recas >= 0) { as = &devinfo->as[pdevinfo->recas]; hdaa_autorecsrc_handler(as, NULL); hdaa_channels_handler(as); } hdaa_unlock(devinfo); } snprintf(status, SND_STATUSLEN, "on %s %s", device_get_nameunit(device_get_parent(dev)), PCM_KLDSTRING(snd_hda)); pcm_setstatus(dev, status); return (0); } static int hdaa_pcm_detach(device_t dev) { struct hdaa_pcm_devinfo *pdevinfo = (struct hdaa_pcm_devinfo *)device_get_ivars(dev); int err; if (pdevinfo->registered > 0) { err = pcm_unregister(dev); if (err != 0) return (err); } return (0); } static device_method_t hdaa_pcm_methods[] = { /* device interface */ DEVMETHOD(device_probe, hdaa_pcm_probe), DEVMETHOD(device_attach, hdaa_pcm_attach), DEVMETHOD(device_detach, hdaa_pcm_detach), DEVMETHOD_END }; static driver_t hdaa_pcm_driver = { "pcm", hdaa_pcm_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_hda_pcm, hdaa, hdaa_pcm_driver, pcm_devclass, NULL, NULL); MODULE_DEPEND(snd_hda, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_hda, 1); diff --git a/sys/dev/sound/pci/hda/hdac.c b/sys/dev/sound/pci/hda/hdac.c index ec907715c5b1..b1fb193595fe 100644 --- a/sys/dev/sound/pci/hda/hdac.c +++ b/sys/dev/sound/pci/hda/hdac.c @@ -1,2164 +1,2163 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Stephane E. Potvin * Copyright (c) 2006 Ariff Abdullah * Copyright (c) 2008-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Intel High Definition Audio (Controller) driver for FreeBSD. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include #include #include #include #include #define HDA_DRV_TEST_REV "20120126_0002" SND_DECLARE_FILE("$FreeBSD$"); #define hdac_lock(sc) snd_mtxlock((sc)->lock) #define hdac_unlock(sc) snd_mtxunlock((sc)->lock) #define hdac_lockassert(sc) snd_mtxassert((sc)->lock) #define HDAC_QUIRK_64BIT (1 << 0) #define HDAC_QUIRK_DMAPOS (1 << 1) #define HDAC_QUIRK_MSI (1 << 2) static const struct { const char *key; uint32_t value; } hdac_quirks_tab[] = { { "64bit", HDAC_QUIRK_64BIT }, { "dmapos", HDAC_QUIRK_DMAPOS }, { "msi", HDAC_QUIRK_MSI }, }; MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller"); static const struct { uint32_t model; const char *desc; char quirks_on; char quirks_off; } hdac_devices[] = { { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 }, { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 }, { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 }, { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 }, { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 }, { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 }, { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 }, { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 }, { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 }, { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 }, { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 }, { HDA_INTEL_BR, "Intel Braswell", 0, 0 }, { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 }, { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 }, { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 }, { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 }, { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 }, { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 }, { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 }, { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 }, { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 }, { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 }, { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 }, { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 }, { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 }, { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 }, { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 }, { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 }, { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 }, { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 }, { HDA_INTEL_82801F, "Intel 82801F", 0, 0 }, { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 }, { HDA_INTEL_82801G, "Intel 82801G", 0, 0 }, { HDA_INTEL_82801H, "Intel 82801H", 0, 0 }, { HDA_INTEL_82801I, "Intel 82801I", 0, 0 }, { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 }, { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 }, { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 }, { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 }, { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 }, { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 }, { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 }, { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 }, { HDA_INTEL_SCH, "Intel SCH", 0, 0 }, { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 }, { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 }, { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 }, { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 }, { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 }, { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 }, { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 }, { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 }, { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 }, { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 }, { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 }, { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 }, { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 }, { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 }, { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 }, { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 }, { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 }, { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, { HDA_ATI_SB450, "ATI SB450", 0, 0 }, { HDA_ATI_SB600, "ATI SB600", 0, 0 }, { HDA_ATI_RS600, "ATI RS600", 0, 0 }, { HDA_ATI_RS690, "ATI RS690", 0, 0 }, { HDA_ATI_RS780, "ATI RS780", 0, 0 }, { HDA_ATI_R600, "ATI R600", 0, 0 }, { HDA_ATI_RV610, "ATI RV610", 0, 0 }, { HDA_ATI_RV620, "ATI RV620", 0, 0 }, { HDA_ATI_RV630, "ATI RV630", 0, 0 }, { HDA_ATI_RV635, "ATI RV635", 0, 0 }, { HDA_ATI_RV710, "ATI RV710", 0, 0 }, { HDA_ATI_RV730, "ATI RV730", 0, 0 }, { HDA_ATI_RV740, "ATI RV740", 0, 0 }, { HDA_ATI_RV770, "ATI RV770", 0, 0 }, { HDA_ATI_RV810, "ATI RV810", 0, 0 }, { HDA_ATI_RV830, "ATI RV830", 0, 0 }, { HDA_ATI_RV840, "ATI RV840", 0, 0 }, { HDA_ATI_RV870, "ATI RV870", 0, 0 }, { HDA_ATI_RV910, "ATI RV910", 0, 0 }, { HDA_ATI_RV930, "ATI RV930", 0, 0 }, { HDA_ATI_RV940, "ATI RV940", 0, 0 }, { HDA_ATI_RV970, "ATI RV970", 0, 0 }, { HDA_ATI_R1000, "ATI R1000", 0, 0 }, { HDA_AMD_X370, "AMD X370", 0, 0 }, { HDA_AMD_X570, "AMD X570", 0, 0 }, { HDA_AMD_STONEY, "AMD Stoney", 0, 0 }, { HDA_AMD_RAVEN, "AMD Raven", 0, 0 }, { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 }, { HDA_RDC_M3010, "RDC M3010", 0, 0 }, { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 }, { HDA_SIS_966, "SiS 966/968", 0, 0 }, { HDA_ULI_M5461, "ULI M5461", 0, 0 }, /* Unknown */ { HDA_INTEL_ALL, "Intel", 0, 0 }, { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 }, { HDA_ATI_ALL, "ATI", 0, 0 }, { HDA_AMD_ALL, "AMD", 0, 0 }, { HDA_CREATIVE_ALL, "Creative", 0, 0 }, { HDA_VIA_ALL, "VIA", 0, 0 }, { HDA_SIS_ALL, "SiS", 0, 0 }, { HDA_ULI_ALL, "ULI", 0, 0 }, }; static const struct { uint16_t vendor; uint8_t reg; uint8_t mask; uint8_t enable; } hdac_pcie_snoop[] = { { INTEL_VENDORID, 0x00, 0x00, 0x00 }, { ATI_VENDORID, 0x42, 0xf8, 0x02 }, { AMD_VENDORID, 0x42, 0xf8, 0x02 }, { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f }, }; /**************************************************************************** * Function prototypes ****************************************************************************/ static void hdac_intr_handler(void *); static int hdac_reset(struct hdac_softc *, bool); static int hdac_get_capabilities(struct hdac_softc *); static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int); static int hdac_dma_alloc(struct hdac_softc *, struct hdac_dma *, bus_size_t); static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *); static int hdac_mem_alloc(struct hdac_softc *); static void hdac_mem_free(struct hdac_softc *); static int hdac_irq_alloc(struct hdac_softc *); static void hdac_irq_free(struct hdac_softc *); static void hdac_corb_init(struct hdac_softc *); static void hdac_rirb_init(struct hdac_softc *); static void hdac_corb_start(struct hdac_softc *); static void hdac_rirb_start(struct hdac_softc *); static void hdac_attach2(void *); static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t); static int hdac_probe(device_t); static int hdac_attach(device_t); static int hdac_detach(device_t); static int hdac_suspend(device_t); static int hdac_resume(device_t); static int hdac_rirb_flush(struct hdac_softc *sc); static int hdac_unsolq_flush(struct hdac_softc *sc); /* This function surely going to make its way into upper level someday. */ static void hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off) { const char *res = NULL; int i = 0, j, k, len, inv; if (resource_string_value(device_get_name(sc->dev), device_get_unit(sc->dev), "config", &res) != 0) return; if (!(res != NULL && strlen(res) > 0)) return; HDA_BOOTVERBOSE( device_printf(sc->dev, "Config options:"); ); for (;;) { while (res[i] != '\0' && (res[i] == ',' || isspace(res[i]) != 0)) i++; if (res[i] == '\0') { HDA_BOOTVERBOSE( printf("\n"); ); return; } j = i; while (res[j] != '\0' && !(res[j] == ',' || isspace(res[j]) != 0)) j++; len = j - i; if (len > 2 && strncmp(res + i, "no", 2) == 0) inv = 2; else inv = 0; for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) { if (strncmp(res + i + inv, hdac_quirks_tab[k].key, len - inv) != 0) continue; if (len - inv != strlen(hdac_quirks_tab[k].key)) continue; HDA_BOOTVERBOSE( printf(" %s%s", (inv != 0) ? "no" : "", hdac_quirks_tab[k].key); ); if (inv == 0) { *on |= hdac_quirks_tab[k].value; *off &= ~hdac_quirks_tab[k].value; } else if (inv != 0) { *off |= hdac_quirks_tab[k].value; *on &= ~hdac_quirks_tab[k].value; } break; } i = j; } } static void hdac_one_intr(struct hdac_softc *sc, uint32_t intsts) { device_t dev; uint8_t rirbsts; int i; /* Was this a controller interrupt? */ if (intsts & HDAC_INTSTS_CIS) { /* * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then * we will need to check and clear HDAC_STATESTS. * That event is used to report codec status changes such as * a reset or a wake-up event. */ /* * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we * will need to check and clear HDAC_CORBSTS_CMEI in * HDAC_CORBSTS. * That event is used to report CORB memory errors. */ /* * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we * will need to check and clear HDAC_RIRBSTS_RIRBOIS in * HDAC_RIRBSTS. * That event is used to report response FIFO overruns. */ /* Get as many responses that we can */ rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); while (rirbsts & HDAC_RIRBSTS_RINTFL) { HDAC_WRITE_1(&sc->mem, HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL); hdac_rirb_flush(sc); rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); } if (sc->unsolq_rp != sc->unsolq_wp) taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); } if (intsts & HDAC_INTSTS_SIS_MASK) { for (i = 0; i < sc->num_ss; i++) { if ((intsts & (1 << i)) == 0) continue; HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS, HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); if ((dev = sc->streams[i].dev) != NULL) { HDAC_STREAM_INTR(dev, sc->streams[i].dir, sc->streams[i].stream); } } } } /**************************************************************************** * void hdac_intr_handler(void *) * * Interrupt handler. Processes interrupts received from the hdac. ****************************************************************************/ static void hdac_intr_handler(void *context) { struct hdac_softc *sc; uint32_t intsts; sc = (struct hdac_softc *)context; /* * Loop until HDAC_INTSTS_GIS gets clear. * It is plausible that hardware interrupts a host only when GIS goes * from zero to one. GIS is formed by OR-ing multiple hardware * statuses, so it's possible that a previously cleared status gets set * again while another status has not been cleared yet. Thus, there * will be no new interrupt as GIS always stayed set. If we don't * re-examine GIS then we can leave it set and never get an interrupt * again. */ intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); while ((intsts & HDAC_INTSTS_GIS) != 0) { hdac_lock(sc); hdac_one_intr(sc, intsts); hdac_unlock(sc); intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); } } static void hdac_poll_callback(void *arg) { struct hdac_softc *sc = arg; if (sc == NULL) return; hdac_lock(sc); if (sc->polling == 0) { hdac_unlock(sc); return; } callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc); hdac_unlock(sc); hdac_intr_handler(sc); } /**************************************************************************** * int hdac_reset(hdac_softc *, bool) * * Reset the hdac to a quiescent and known state. ****************************************************************************/ static int hdac_reset(struct hdac_softc *sc, bool wakeup) { uint32_t gctl; int count, i; /* * Stop all Streams DMA engine */ for (i = 0; i < sc->num_iss; i++) HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0); for (i = 0; i < sc->num_oss; i++) HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0); for (i = 0; i < sc->num_bss; i++) HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0); /* * Stop Control DMA engines. */ HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0); HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0); /* * Reset DMA position buffer. */ HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0); HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0); /* * Reset the controller. The reset must remain asserted for * a minimum of 100us. */ gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST); count = 10000; do { gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); if (!(gctl & HDAC_GCTL_CRST)) break; DELAY(10); } while (--count); if (gctl & HDAC_GCTL_CRST) { device_printf(sc->dev, "Unable to put hdac in reset\n"); return (ENXIO); } /* If wakeup is not requested - leave the controller in reset state. */ if (!wakeup) return (0); DELAY(100); gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST); count = 10000; do { gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); if (gctl & HDAC_GCTL_CRST) break; DELAY(10); } while (--count); if (!(gctl & HDAC_GCTL_CRST)) { device_printf(sc->dev, "Device stuck in reset\n"); return (ENXIO); } /* * Wait for codecs to finish their own reset sequence. The delay here * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery). */ DELAY(1000); return (0); } /**************************************************************************** * int hdac_get_capabilities(struct hdac_softc *); * * Retreive the general capabilities of the hdac; * Number of Input Streams * Number of Output Streams * Number of bidirectional Streams * 64bit ready * CORB and RIRB sizes ****************************************************************************/ static int hdac_get_capabilities(struct hdac_softc *sc) { uint16_t gcap; uint8_t corbsize, rirbsize; gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP); sc->num_iss = HDAC_GCAP_ISS(gcap); sc->num_oss = HDAC_GCAP_OSS(gcap); sc->num_bss = HDAC_GCAP_BSS(gcap); sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss; sc->num_sdo = HDAC_GCAP_NSDO(gcap); sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0; if (sc->quirks_on & HDAC_QUIRK_64BIT) sc->support_64bit = 1; else if (sc->quirks_off & HDAC_QUIRK_64BIT) sc->support_64bit = 0; corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE); if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) == HDAC_CORBSIZE_CORBSZCAP_256) sc->corb_size = 256; else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) == HDAC_CORBSIZE_CORBSZCAP_16) sc->corb_size = 16; else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) == HDAC_CORBSIZE_CORBSZCAP_2) sc->corb_size = 2; else { device_printf(sc->dev, "%s: Invalid corb size (%x)\n", __func__, corbsize); return (ENXIO); } rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE); if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) == HDAC_RIRBSIZE_RIRBSZCAP_256) sc->rirb_size = 256; else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) == HDAC_RIRBSIZE_RIRBSZCAP_16) sc->rirb_size = 16; else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) == HDAC_RIRBSIZE_RIRBSZCAP_2) sc->rirb_size = 2; else { device_printf(sc->dev, "%s: Invalid rirb size (%x)\n", __func__, rirbsize); return (ENXIO); } HDA_BOOTVERBOSE( device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, " "NSDO %d%s, CORB %d, RIRB %d\n", sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo, sc->support_64bit ? ", 64bit" : "", sc->corb_size, sc->rirb_size); ); return (0); } /**************************************************************************** * void hdac_dma_cb * * This function is called by bus_dmamap_load when the mapping has been * established. We just record the physical address of the mapping into * the struct hdac_dma passed in. ****************************************************************************/ static void hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error) { struct hdac_dma *dma; if (error == 0) { dma = (struct hdac_dma *)callback_arg; dma->dma_paddr = segs[0].ds_addr; } } /**************************************************************************** * int hdac_dma_alloc * * This function allocate and setup a dma region (struct hdac_dma). * It must be freed by a corresponding hdac_dma_free. ****************************************************************************/ static int hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size) { bus_size_t roundsz; int result; roundsz = roundup2(size, HDA_DMA_ALIGNMENT); bzero(dma, sizeof(*dma)); /* * Create a DMA tag */ result = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* parent */ HDA_DMA_ALIGNMENT, /* alignment */ 0, /* boundary */ (sc->support_64bit) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, /* filtfunc */ NULL, /* fistfuncarg */ roundsz, /* maxsize */ 1, /* nsegments */ roundsz, /* maxsegsz */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &dma->dma_tag); /* dmat */ if (result != 0) { device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n", __func__, result); goto hdac_dma_alloc_fail; } /* * Allocate DMA memory */ result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO | ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE : BUS_DMA_COHERENT), &dma->dma_map); if (result != 0) { device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n", __func__, result); goto hdac_dma_alloc_fail; } dma->dma_size = roundsz; /* * Map the memory */ result = bus_dmamap_load(dma->dma_tag, dma->dma_map, (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0); if (result != 0 || dma->dma_paddr == 0) { if (result == 0) result = ENOMEM; device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n", __func__, result); goto hdac_dma_alloc_fail; } HDA_BOOTHVERBOSE( device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n", __func__, (uintmax_t)size, (uintmax_t)roundsz); ); return (0); hdac_dma_alloc_fail: hdac_dma_free(sc, dma); return (result); } /**************************************************************************** * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *) * * Free a struct hdac_dma that has been previously allocated via the * hdac_dma_alloc function. ****************************************************************************/ static void hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma) { if (dma->dma_paddr != 0) { /* Flush caches */ bus_dmamap_sync(dma->dma_tag, dma->dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->dma_tag, dma->dma_map); dma->dma_paddr = 0; } if (dma->dma_vaddr != NULL) { bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); dma->dma_vaddr = NULL; } if (dma->dma_tag != NULL) { bus_dma_tag_destroy(dma->dma_tag); dma->dma_tag = NULL; } dma->dma_size = 0; } /**************************************************************************** * int hdac_mem_alloc(struct hdac_softc *) * * Allocate all the bus resources necessary to speak with the physical * controller. ****************************************************************************/ static int hdac_mem_alloc(struct hdac_softc *sc) { struct hdac_mem *mem; mem = &sc->mem; mem->mem_rid = PCIR_BAR(0); mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &mem->mem_rid, RF_ACTIVE); if (mem->mem_res == NULL) { device_printf(sc->dev, "%s: Unable to allocate memory resource\n", __func__); return (ENOMEM); } mem->mem_tag = rman_get_bustag(mem->mem_res); mem->mem_handle = rman_get_bushandle(mem->mem_res); return (0); } /**************************************************************************** * void hdac_mem_free(struct hdac_softc *) * * Free up resources previously allocated by hdac_mem_alloc. ****************************************************************************/ static void hdac_mem_free(struct hdac_softc *sc) { struct hdac_mem *mem; mem = &sc->mem; if (mem->mem_res != NULL) bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid, mem->mem_res); mem->mem_res = NULL; } /**************************************************************************** * int hdac_irq_alloc(struct hdac_softc *) * * Allocate and setup the resources necessary for interrupt handling. ****************************************************************************/ static int hdac_irq_alloc(struct hdac_softc *sc) { struct hdac_irq *irq; int result; irq = &sc->irq; irq->irq_rid = 0x0; if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 && (result = pci_msi_count(sc->dev)) == 1 && pci_alloc_msi(sc->dev, &result) == 0) irq->irq_rid = 0x1; irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE); if (irq->irq_res == NULL) { device_printf(sc->dev, "%s: Unable to allocate irq\n", __func__); goto hdac_irq_alloc_fail; } result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV, NULL, hdac_intr_handler, sc, &irq->irq_handle); if (result != 0) { device_printf(sc->dev, "%s: Unable to setup interrupt handler (%d)\n", __func__, result); goto hdac_irq_alloc_fail; } return (0); hdac_irq_alloc_fail: hdac_irq_free(sc); return (ENXIO); } /**************************************************************************** * void hdac_irq_free(struct hdac_softc *) * * Free up resources previously allocated by hdac_irq_alloc. ****************************************************************************/ static void hdac_irq_free(struct hdac_softc *sc) { struct hdac_irq *irq; irq = &sc->irq; if (irq->irq_res != NULL && irq->irq_handle != NULL) bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle); if (irq->irq_res != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid, irq->irq_res); if (irq->irq_rid == 0x1) pci_release_msi(sc->dev); irq->irq_handle = NULL; irq->irq_res = NULL; irq->irq_rid = 0x0; } /**************************************************************************** * void hdac_corb_init(struct hdac_softc *) * * Initialize the corb registers for operations but do not start it up yet. * The CORB engine must not be running when this function is called. ****************************************************************************/ static void hdac_corb_init(struct hdac_softc *sc) { uint8_t corbsize; uint64_t corbpaddr; /* Setup the CORB size. */ switch (sc->corb_size) { case 256: corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256); break; case 16: corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16); break; case 2: corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2); break; default: panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size); } HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize); /* Setup the CORB Address in the hdac */ corbpaddr = (uint64_t)sc->corb_dma.dma_paddr; HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr); HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32)); /* Set the WP and RP */ sc->corb_wp = 0; HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST); /* * The HDA specification indicates that the CORBRPRST bit will always * read as zero. Unfortunately, it seems that at least the 82801G * doesn't reset the bit to zero, which stalls the corb engine. * manually reset the bit to zero before continuing. */ HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0); /* Enable CORB error reporting */ #if 0 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE); #endif } /**************************************************************************** * void hdac_rirb_init(struct hdac_softc *) * * Initialize the rirb registers for operations but do not start it up yet. * The RIRB engine must not be running when this function is called. ****************************************************************************/ static void hdac_rirb_init(struct hdac_softc *sc) { uint8_t rirbsize; uint64_t rirbpaddr; /* Setup the RIRB size. */ switch (sc->rirb_size) { case 256: rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256); break; case 16: rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16); break; case 2: rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2); break; default: panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size); } HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize); /* Setup the RIRB Address in the hdac */ rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr; HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr); HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32)); /* Setup the WP and RP */ sc->rirb_rp = 0; HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST); /* Setup the interrupt threshold */ HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2); /* Enable Overrun and response received reporting */ #if 0 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL); #else HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL); #endif /* * Make sure that the Host CPU cache doesn't contain any dirty * cache lines that falls in the rirb. If I understood correctly, it * should be sufficient to do this only once as the rirb is purely * read-only from now on. */ bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, BUS_DMASYNC_PREREAD); } /**************************************************************************** * void hdac_corb_start(hdac_softc *) * * Startup the corb DMA engine ****************************************************************************/ static void hdac_corb_start(struct hdac_softc *sc) { uint32_t corbctl; corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL); corbctl |= HDAC_CORBCTL_CORBRUN; HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl); } /**************************************************************************** * void hdac_rirb_start(hdac_softc *) * * Startup the rirb DMA engine ****************************************************************************/ static void hdac_rirb_start(struct hdac_softc *sc) { uint32_t rirbctl; rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL); rirbctl |= HDAC_RIRBCTL_RIRBDMAEN; HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl); } static int hdac_rirb_flush(struct hdac_softc *sc) { struct hdac_rirb *rirb_base, *rirb; nid_t cad; uint32_t resp, resp_ex; uint8_t rirbwp; int ret; rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr; rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP); bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, BUS_DMASYNC_POSTREAD); ret = 0; while (sc->rirb_rp != rirbwp) { sc->rirb_rp++; sc->rirb_rp %= sc->rirb_size; rirb = &rirb_base[sc->rirb_rp]; resp = le32toh(rirb->response); resp_ex = le32toh(rirb->response_ex); cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex); if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) { sc->unsolq[sc->unsolq_wp++] = resp; sc->unsolq_wp %= HDAC_UNSOLQ_MAX; sc->unsolq[sc->unsolq_wp++] = cad; sc->unsolq_wp %= HDAC_UNSOLQ_MAX; } else if (sc->codecs[cad].pending <= 0) { device_printf(sc->dev, "Unexpected unsolicited " "response from address %d: %08x\n", cad, resp); } else { sc->codecs[cad].response = resp; sc->codecs[cad].pending--; } ret++; } bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, BUS_DMASYNC_PREREAD); return (ret); } static int hdac_unsolq_flush(struct hdac_softc *sc) { device_t child; nid_t cad; uint32_t resp; int ret = 0; if (sc->unsolq_st == HDAC_UNSOLQ_READY) { sc->unsolq_st = HDAC_UNSOLQ_BUSY; while (sc->unsolq_rp != sc->unsolq_wp) { resp = sc->unsolq[sc->unsolq_rp++]; sc->unsolq_rp %= HDAC_UNSOLQ_MAX; cad = sc->unsolq[sc->unsolq_rp++]; sc->unsolq_rp %= HDAC_UNSOLQ_MAX; if ((child = sc->codecs[cad].dev) != NULL && device_is_attached(child)) HDAC_UNSOL_INTR(child, resp); ret++; } sc->unsolq_st = HDAC_UNSOLQ_READY; } return (ret); } /**************************************************************************** * uint32_t hdac_send_command * * Wrapper function that sends only one command to a given codec ****************************************************************************/ static uint32_t hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb) { int timeout; uint32_t *corb; hdac_lockassert(sc); verb &= ~HDA_CMD_CAD_MASK; verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT; sc->codecs[cad].response = HDA_INVALID; sc->codecs[cad].pending++; sc->corb_wp++; sc->corb_wp %= sc->corb_size; corb = (uint32_t *)sc->corb_dma.dma_vaddr; bus_dmamap_sync(sc->corb_dma.dma_tag, sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE); corb[sc->corb_wp] = htole32(verb); bus_dmamap_sync(sc->corb_dma.dma_tag, sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE); HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); timeout = 10000; do { if (hdac_rirb_flush(sc) == 0) DELAY(10); } while (sc->codecs[cad].pending != 0 && --timeout); if (sc->codecs[cad].pending != 0) { device_printf(sc->dev, "Command 0x%08x timeout on address %d\n", verb, cad); sc->codecs[cad].pending = 0; } if (sc->unsolq_rp != sc->unsolq_wp) taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); return (sc->codecs[cad].response); } /**************************************************************************** * Device Methods ****************************************************************************/ /**************************************************************************** * int hdac_probe(device_t) * * Probe for the presence of an hdac. If none is found, check for a generic * match using the subclass of the device. ****************************************************************************/ static int hdac_probe(device_t dev) { int i, result; uint32_t model; uint16_t class, subclass; char desc[64]; model = (uint32_t)pci_get_device(dev) << 16; model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; class = pci_get_class(dev); subclass = pci_get_subclass(dev); bzero(desc, sizeof(desc)); result = ENXIO; for (i = 0; i < nitems(hdac_devices); i++) { if (hdac_devices[i].model == model) { strlcpy(desc, hdac_devices[i].desc, sizeof(desc)); result = BUS_PROBE_DEFAULT; break; } if (HDA_DEV_MATCH(hdac_devices[i].model, model) && class == PCIC_MULTIMEDIA && subclass == PCIS_MULTIMEDIA_HDA) { snprintf(desc, sizeof(desc), "%s (0x%04x)", hdac_devices[i].desc, pci_get_device(dev)); result = BUS_PROBE_GENERIC; break; } } if (result == ENXIO && class == PCIC_MULTIMEDIA && subclass == PCIS_MULTIMEDIA_HDA) { snprintf(desc, sizeof(desc), "Generic (0x%08x)", model); result = BUS_PROBE_GENERIC; } if (result != ENXIO) { strlcat(desc, " HDA Controller", sizeof(desc)); device_set_desc_copy(dev, desc); } return (result); } static void hdac_unsolq_task(void *context, int pending) { struct hdac_softc *sc; sc = (struct hdac_softc *)context; hdac_lock(sc); hdac_unsolq_flush(sc); hdac_unlock(sc); } /**************************************************************************** * int hdac_attach(device_t) * * Attach the device into the kernel. Interrupts usually won't be enabled * when this function is called. Setup everything that doesn't require * interrupts and defer probing of codecs until interrupts are enabled. ****************************************************************************/ static int hdac_attach(device_t dev) { struct hdac_softc *sc; int result; int i, devid = -1; uint32_t model; uint16_t class, subclass; uint16_t vendor; uint8_t v; sc = device_get_softc(dev); HDA_BOOTVERBOSE( device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n", pci_get_subvendor(dev), pci_get_subdevice(dev)); device_printf(dev, "HDA Driver Revision: %s\n", HDA_DRV_TEST_REV); ); model = (uint32_t)pci_get_device(dev) << 16; model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; class = pci_get_class(dev); subclass = pci_get_subclass(dev); for (i = 0; i < nitems(hdac_devices); i++) { if (hdac_devices[i].model == model) { devid = i; break; } if (HDA_DEV_MATCH(hdac_devices[i].model, model) && class == PCIC_MULTIMEDIA && subclass == PCIS_MULTIMEDIA_HDA) { devid = i; break; } } sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex"); sc->dev = dev; TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc); callout_init(&sc->poll_callout, 1); for (i = 0; i < HDAC_CODEC_MAX; i++) sc->codecs[i].dev = NULL; if (devid >= 0) { sc->quirks_on = hdac_devices[devid].quirks_on; sc->quirks_off = hdac_devices[devid].quirks_off; } else { sc->quirks_on = 0; sc->quirks_off = 0; } if (resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &i) == 0) { if (i == 0) sc->quirks_off |= HDAC_QUIRK_MSI; else { sc->quirks_on |= HDAC_QUIRK_MSI; sc->quirks_off |= ~HDAC_QUIRK_MSI; } } hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off); HDA_BOOTVERBOSE( device_printf(sc->dev, "Config options: on=0x%08x off=0x%08x\n", sc->quirks_on, sc->quirks_off); ); sc->poll_ival = hz; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "polling", &i) == 0 && i != 0) sc->polling = 1; else sc->polling = 0; pci_enable_busmaster(dev); vendor = pci_get_vendor(dev); if (vendor == INTEL_VENDORID) { /* TCSEL -> TC0 */ v = pci_read_config(dev, 0x44, 1); pci_write_config(dev, 0x44, v & 0xf8, 1); HDA_BOOTHVERBOSE( device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v, pci_read_config(dev, 0x44, 1)); ); } #if defined(__i386__) || defined(__amd64__) sc->flags |= HDAC_F_DMA_NOCACHE; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "snoop", &i) == 0 && i != 0) { #else sc->flags &= ~HDAC_F_DMA_NOCACHE; #endif /* * Try to enable PCIe snoop to avoid messing around with * uncacheable DMA attribute. Since PCIe snoop register * config is pretty much vendor specific, there are no * general solutions on how to enable it, forcing us (even * Microsoft) to enable uncacheable or write combined DMA * by default. * * http://msdn2.microsoft.com/en-us/library/ms790324.aspx */ for (i = 0; i < nitems(hdac_pcie_snoop); i++) { if (hdac_pcie_snoop[i].vendor != vendor) continue; sc->flags &= ~HDAC_F_DMA_NOCACHE; if (hdac_pcie_snoop[i].reg == 0x00) break; v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); if ((v & hdac_pcie_snoop[i].enable) == hdac_pcie_snoop[i].enable) break; v &= hdac_pcie_snoop[i].mask; v |= hdac_pcie_snoop[i].enable; pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1); v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); if ((v & hdac_pcie_snoop[i].enable) != hdac_pcie_snoop[i].enable) { HDA_BOOTVERBOSE( device_printf(dev, "WARNING: Failed to enable PCIe " "snoop!\n"); ); #if defined(__i386__) || defined(__amd64__) sc->flags |= HDAC_F_DMA_NOCACHE; #endif } break; } #if defined(__i386__) || defined(__amd64__) } #endif HDA_BOOTHVERBOSE( device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n", (sc->flags & HDAC_F_DMA_NOCACHE) ? "Uncacheable" : "PCIe snoop", vendor); ); /* Allocate resources */ result = hdac_mem_alloc(sc); if (result != 0) goto hdac_attach_fail; result = hdac_irq_alloc(sc); if (result != 0) goto hdac_attach_fail; /* Get Capabilities */ result = hdac_get_capabilities(sc); if (result != 0) goto hdac_attach_fail; /* Allocate CORB, RIRB, POS and BDLs dma memory */ result = hdac_dma_alloc(sc, &sc->corb_dma, sc->corb_size * sizeof(uint32_t)); if (result != 0) goto hdac_attach_fail; result = hdac_dma_alloc(sc, &sc->rirb_dma, sc->rirb_size * sizeof(struct hdac_rirb)); if (result != 0) goto hdac_attach_fail; sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss, M_HDAC, M_ZERO | M_WAITOK); for (i = 0; i < sc->num_ss; i++) { result = hdac_dma_alloc(sc, &sc->streams[i].bdl, sizeof(struct hdac_bdle) * HDA_BDL_MAX); if (result != 0) goto hdac_attach_fail; } if (sc->quirks_on & HDAC_QUIRK_DMAPOS) { if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) { HDA_BOOTVERBOSE( device_printf(dev, "Failed to " "allocate DMA pos buffer " "(non-fatal)\n"); ); } else { uint64_t addr = sc->pos_dma.dma_paddr; HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32); HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, (addr & HDAC_DPLBASE_DPLBASE_MASK) | HDAC_DPLBASE_DPLBASE_DMAPBE); } } result = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* parent */ HDA_DMA_ALIGNMENT, /* alignment */ 0, /* boundary */ (sc->support_64bit) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, /* filtfunc */ NULL, /* fistfuncarg */ HDA_BUFSZ_MAX, /* maxsize */ 1, /* nsegments */ HDA_BUFSZ_MAX, /* maxsegsz */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &sc->chan_dmat); /* dmat */ if (result != 0) { device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n", __func__, result); goto hdac_attach_fail; } /* Quiesce everything */ HDA_BOOTHVERBOSE( device_printf(dev, "Reset controller...\n"); ); hdac_reset(sc, true); /* Initialize the CORB and RIRB */ hdac_corb_init(sc); hdac_rirb_init(sc); /* Defer remaining of initialization until interrupts are enabled */ sc->intrhook.ich_func = hdac_attach2; sc->intrhook.ich_arg = (void *)sc; if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) { sc->intrhook.ich_func = NULL; hdac_attach2((void *)sc); } return (0); hdac_attach_fail: hdac_irq_free(sc); if (sc->streams != NULL) for (i = 0; i < sc->num_ss; i++) hdac_dma_free(sc, &sc->streams[i].bdl); free(sc->streams, M_HDAC); hdac_dma_free(sc, &sc->rirb_dma); hdac_dma_free(sc, &sc->corb_dma); hdac_mem_free(sc); snd_mtxfree(sc->lock); return (ENXIO); } static int sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS) { struct hdac_softc *sc; device_t *devlist; device_t dev; int devcount, i, err, val; dev = oidp->oid_arg1; sc = device_get_softc(dev); if (sc == NULL) return (EINVAL); val = 0; err = sysctl_handle_int(oidp, &val, 0, req); if (err != 0 || req->newptr == NULL || val == 0) return (err); /* XXX: Temporary. For debugging. */ if (val == 100) { hdac_suspend(dev); return (0); } else if (val == 101) { hdac_resume(dev); return (0); } if ((err = device_get_children(dev, &devlist, &devcount)) != 0) return (err); hdac_lock(sc); for (i = 0; i < devcount; i++) HDAC_PINDUMP(devlist[i]); hdac_unlock(sc); free(devlist, M_TEMP); return (0); } static int hdac_mdata_rate(uint16_t fmt) { static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 }; int rate, bits; if (fmt & (1 << 14)) rate = 44100; else rate = 48000; rate *= ((fmt >> 11) & 0x07) + 1; rate /= ((fmt >> 8) & 0x07) + 1; bits = mbits[(fmt >> 4) & 0x03]; bits *= (fmt & 0x0f) + 1; return (rate * bits); } static int hdac_bdata_rate(uint16_t fmt, int output) { static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; int rate, bits; rate = 48000; rate *= ((fmt >> 11) & 0x07) + 1; bits = bbits[(fmt >> 4) & 0x03]; bits *= (fmt & 0x0f) + 1; if (!output) bits = ((bits + 7) & ~0x07) + 10; return (rate * bits); } static void hdac_poll_reinit(struct hdac_softc *sc) { int i, pollticks, min = 1000000; struct hdac_stream *s; if (sc->polling == 0) return; if (sc->unsol_registered > 0) min = hz / 2; for (i = 0; i < sc->num_ss; i++) { s = &sc->streams[i]; if (s->running == 0) continue; pollticks = ((uint64_t)hz * s->blksz) / (hdac_mdata_rate(s->format) / 8); pollticks >>= 1; if (pollticks > hz) pollticks = hz; if (pollticks < 1) pollticks = 1; if (min > pollticks) min = pollticks; } sc->poll_ival = min; if (min == 1000000) callout_stop(&sc->poll_callout); else callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc); } static int sysctl_hdac_polling(SYSCTL_HANDLER_ARGS) { struct hdac_softc *sc; device_t dev; uint32_t ctl; int err, val; dev = oidp->oid_arg1; sc = device_get_softc(dev); if (sc == NULL) return (EINVAL); hdac_lock(sc); val = sc->polling; hdac_unlock(sc); err = sysctl_handle_int(oidp, &val, 0, req); if (err != 0 || req->newptr == NULL) return (err); if (val < 0 || val > 1) return (EINVAL); hdac_lock(sc); if (val != sc->polling) { if (val == 0) { callout_stop(&sc->poll_callout); hdac_unlock(sc); callout_drain(&sc->poll_callout); hdac_lock(sc); sc->polling = 0; ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); ctl |= HDAC_INTCTL_GIE; HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); } else { ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); ctl &= ~HDAC_INTCTL_GIE; HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); sc->polling = 1; hdac_poll_reinit(sc); } } hdac_unlock(sc); return (err); } static void hdac_attach2(void *arg) { struct hdac_softc *sc; device_t child; uint32_t vendorid, revisionid; int i; uint16_t statests; sc = (struct hdac_softc *)arg; hdac_lock(sc); /* Remove ourselves from the config hooks */ if (sc->intrhook.ich_func != NULL) { config_intrhook_disestablish(&sc->intrhook); sc->intrhook.ich_func = NULL; } HDA_BOOTHVERBOSE( device_printf(sc->dev, "Starting CORB Engine...\n"); ); hdac_corb_start(sc); HDA_BOOTHVERBOSE( device_printf(sc->dev, "Starting RIRB Engine...\n"); ); hdac_rirb_start(sc); /* * Clear HDAC_WAKEEN as at present we have no use for SDI wake * (status change) interrupts. The documentation says that we * should not make any assumptions about the state of this register * and set it explicitly. * NB: this needs to be done before the interrupt is enabled as * the handler does not expect this interrupt source. */ HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); /* * Read and clear post-reset SDI wake status. * Each set bit corresponds to a codec that came out of reset. */ statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS); HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests); HDA_BOOTHVERBOSE( device_printf(sc->dev, "Enabling controller interrupt...\n"); ); HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | HDAC_GCTL_UNSOL); if (sc->polling == 0) { HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); } DELAY(1000); HDA_BOOTHVERBOSE( device_printf(sc->dev, "Scanning HDA codecs ...\n"); ); hdac_unlock(sc); for (i = 0; i < HDAC_CODEC_MAX; i++) { if (HDAC_STATESTS_SDIWAKE(statests, i)) { HDA_BOOTHVERBOSE( device_printf(sc->dev, "Found CODEC at address %d\n", i); ); hdac_lock(sc); vendorid = hdac_send_command(sc, i, HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID)); revisionid = hdac_send_command(sc, i, HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID)); hdac_unlock(sc); if (vendorid == HDA_INVALID && revisionid == HDA_INVALID) { device_printf(sc->dev, "CODEC at address %d not responding!\n", i); continue; } sc->codecs[i].vendor_id = HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid); sc->codecs[i].device_id = HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid); sc->codecs[i].revision_id = HDA_PARAM_REVISION_ID_REVISION_ID(revisionid); sc->codecs[i].stepping_id = HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid); child = device_add_child(sc->dev, "hdacc", -1); if (child == NULL) { device_printf(sc->dev, "Failed to add CODEC device\n"); continue; } device_set_ivars(child, (void *)(intptr_t)i); sc->codecs[i].dev = child; } } bus_generic_attach(sc->dev); SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "pindump", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc->dev, sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "polling", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc->dev, sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode"); } /**************************************************************************** * int hdac_suspend(device_t) * * Suspend and power down HDA bus and codecs. ****************************************************************************/ static int hdac_suspend(device_t dev) { struct hdac_softc *sc = device_get_softc(dev); HDA_BOOTHVERBOSE( device_printf(dev, "Suspend...\n"); ); bus_generic_suspend(dev); hdac_lock(sc); HDA_BOOTHVERBOSE( device_printf(dev, "Reset controller...\n"); ); callout_stop(&sc->poll_callout); hdac_reset(sc, false); hdac_unlock(sc); callout_drain(&sc->poll_callout); taskqueue_drain(taskqueue_thread, &sc->unsolq_task); HDA_BOOTHVERBOSE( device_printf(dev, "Suspend done\n"); ); return (0); } /**************************************************************************** * int hdac_resume(device_t) * * Powerup and restore HDA bus and codecs state. ****************************************************************************/ static int hdac_resume(device_t dev) { struct hdac_softc *sc = device_get_softc(dev); int error; HDA_BOOTHVERBOSE( device_printf(dev, "Resume...\n"); ); hdac_lock(sc); /* Quiesce everything */ HDA_BOOTHVERBOSE( device_printf(dev, "Reset controller...\n"); ); hdac_reset(sc, true); /* Initialize the CORB and RIRB */ hdac_corb_init(sc); hdac_rirb_init(sc); HDA_BOOTHVERBOSE( device_printf(dev, "Starting CORB Engine...\n"); ); hdac_corb_start(sc); HDA_BOOTHVERBOSE( device_printf(dev, "Starting RIRB Engine...\n"); ); hdac_rirb_start(sc); /* * Clear HDAC_WAKEEN as at present we have no use for SDI wake * (status change) events. The documentation says that we should * not make any assumptions about the state of this register and * set it explicitly. * Also, clear HDAC_STATESTS. * NB: this needs to be done before the interrupt is enabled as * the handler does not expect this interrupt source. */ HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK); HDA_BOOTHVERBOSE( device_printf(dev, "Enabling controller interrupt...\n"); ); HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | HDAC_GCTL_UNSOL); HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); DELAY(1000); hdac_poll_reinit(sc); hdac_unlock(sc); error = bus_generic_resume(dev); HDA_BOOTHVERBOSE( device_printf(dev, "Resume done\n"); ); return (error); } /**************************************************************************** * int hdac_detach(device_t) * * Detach and free up resources utilized by the hdac device. ****************************************************************************/ static int hdac_detach(device_t dev) { struct hdac_softc *sc = device_get_softc(dev); device_t *devlist; int cad, i, devcount, error; if ((error = device_get_children(dev, &devlist, &devcount)) != 0) return (error); for (i = 0; i < devcount; i++) { cad = (intptr_t)device_get_ivars(devlist[i]); if ((error = device_delete_child(dev, devlist[i])) != 0) { free(devlist, M_TEMP); return (error); } sc->codecs[cad].dev = NULL; } free(devlist, M_TEMP); hdac_lock(sc); hdac_reset(sc, false); hdac_unlock(sc); taskqueue_drain(taskqueue_thread, &sc->unsolq_task); hdac_irq_free(sc); for (i = 0; i < sc->num_ss; i++) hdac_dma_free(sc, &sc->streams[i].bdl); free(sc->streams, M_HDAC); hdac_dma_free(sc, &sc->pos_dma); hdac_dma_free(sc, &sc->rirb_dma); hdac_dma_free(sc, &sc->corb_dma); if (sc->chan_dmat != NULL) { bus_dma_tag_destroy(sc->chan_dmat); sc->chan_dmat = NULL; } hdac_mem_free(sc); snd_mtxfree(sc->lock); return (0); } static bus_dma_tag_t hdac_get_dma_tag(device_t dev, device_t child) { struct hdac_softc *sc = device_get_softc(dev); return (sc->chan_dmat); } static int hdac_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } static int -hdac_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) +hdac_child_location(device_t dev, device_t child, struct sbuf *sb) { - snprintf(buf, buflen, "cad=%d", (int)(intptr_t)device_get_ivars(child)); + sbuf_printf(sb, "cad=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static int -hdac_child_pnpinfo_str_method(device_t dev, device_t child, char *buf, - size_t buflen) +hdac_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb) { struct hdac_softc *sc = device_get_softc(dev); nid_t cad = (uintptr_t)device_get_ivars(child); - snprintf(buf, buflen, + sbuf_printf(sb, "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x", sc->codecs[cad].vendor_id, sc->codecs[cad].device_id, sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id); return (0); } static int hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct hdac_softc *sc = device_get_softc(dev); nid_t cad = (uintptr_t)device_get_ivars(child); switch (which) { case HDA_IVAR_CODEC_ID: *result = cad; break; case HDA_IVAR_VENDOR_ID: *result = sc->codecs[cad].vendor_id; break; case HDA_IVAR_DEVICE_ID: *result = sc->codecs[cad].device_id; break; case HDA_IVAR_REVISION_ID: *result = sc->codecs[cad].revision_id; break; case HDA_IVAR_STEPPING_ID: *result = sc->codecs[cad].stepping_id; break; case HDA_IVAR_SUBVENDOR_ID: *result = pci_get_subvendor(dev); break; case HDA_IVAR_SUBDEVICE_ID: *result = pci_get_subdevice(dev); break; case HDA_IVAR_DMA_NOCACHE: *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0; break; case HDA_IVAR_STRIPES_MASK: *result = (1 << (1 << sc->num_sdo)) - 1; break; default: return (ENOENT); } return (0); } static struct mtx * hdac_get_mtx(device_t dev, device_t child) { struct hdac_softc *sc = device_get_softc(dev); return (sc->lock); } static uint32_t hdac_codec_command(device_t dev, device_t child, uint32_t verb) { return (hdac_send_command(device_get_softc(dev), (intptr_t)device_get_ivars(child), verb)); } static int hdac_find_stream(struct hdac_softc *sc, int dir, int stream) { int i, ss; ss = -1; /* Allocate ISS/OSS first. */ if (dir == 0) { for (i = 0; i < sc->num_iss; i++) { if (sc->streams[i].stream == stream) { ss = i; break; } } } else { for (i = 0; i < sc->num_oss; i++) { if (sc->streams[i + sc->num_iss].stream == stream) { ss = i + sc->num_iss; break; } } } /* Fallback to BSS. */ if (ss == -1) { for (i = 0; i < sc->num_bss; i++) { if (sc->streams[i + sc->num_iss + sc->num_oss].stream == stream) { ss = i + sc->num_iss + sc->num_oss; break; } } } return (ss); } static int hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, uint32_t **dmapos) { struct hdac_softc *sc = device_get_softc(dev); nid_t cad = (uintptr_t)device_get_ivars(child); int stream, ss, bw, maxbw, prevbw; /* Look for empty stream. */ ss = hdac_find_stream(sc, dir, 0); /* Return if found nothing. */ if (ss < 0) return (0); /* Check bus bandwidth. */ bw = hdac_bdata_rate(format, dir); if (dir == 1) { bw *= 1 << (sc->num_sdo - stripe); prevbw = sc->sdo_bw_used; maxbw = 48000 * 960 * (1 << sc->num_sdo); } else { prevbw = sc->codecs[cad].sdi_bw_used; maxbw = 48000 * 464; } HDA_BOOTHVERBOSE( device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n", (bw + prevbw) / 1000, maxbw / 1000, bw + prevbw > maxbw ? " -- OVERFLOW!" : ""); ); if (bw + prevbw > maxbw) return (0); if (dir == 1) sc->sdo_bw_used += bw; else sc->codecs[cad].sdi_bw_used += bw; /* Allocate stream number */ if (ss >= sc->num_iss + sc->num_oss) stream = 15 - (ss - sc->num_iss - sc->num_oss); else if (ss >= sc->num_iss) stream = ss - sc->num_iss + 1; else stream = ss + 1; sc->streams[ss].dev = child; sc->streams[ss].dir = dir; sc->streams[ss].stream = stream; sc->streams[ss].bw = bw; sc->streams[ss].format = format; sc->streams[ss].stripe = stripe; if (dmapos != NULL) { if (sc->pos_dma.dma_vaddr != NULL) *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8); else *dmapos = NULL; } return (stream); } static void hdac_stream_free(device_t dev, device_t child, int dir, int stream) { struct hdac_softc *sc = device_get_softc(dev); nid_t cad = (uintptr_t)device_get_ivars(child); int ss; ss = hdac_find_stream(sc, dir, stream); KASSERT(ss >= 0, ("Free for not allocated stream (%d/%d)\n", dir, stream)); if (dir == 1) sc->sdo_bw_used -= sc->streams[ss].bw; else sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw; sc->streams[ss].stream = 0; sc->streams[ss].dev = NULL; } static int hdac_stream_start(device_t dev, device_t child, int dir, int stream, bus_addr_t buf, int blksz, int blkcnt) { struct hdac_softc *sc = device_get_softc(dev); struct hdac_bdle *bdle; uint64_t addr; int i, ss, off; uint32_t ctl; ss = hdac_find_stream(sc, dir, stream); KASSERT(ss >= 0, ("Start for not allocated stream (%d/%d)\n", dir, stream)); addr = (uint64_t)buf; bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr; for (i = 0; i < blkcnt; i++, bdle++) { bdle->addrl = htole32((uint32_t)addr); bdle->addrh = htole32((uint32_t)(addr >> 32)); bdle->len = htole32(blksz); bdle->ioc = htole32(1); addr += blksz; } bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE); off = ss << 5; HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt); HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1); addr = sc->streams[ss].bdl.dma_paddr; HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr); HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32)); ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2); if (dir) ctl |= HDAC_SDCTL2_DIR; else ctl &= ~HDAC_SDCTL2_DIR; ctl &= ~HDAC_SDCTL2_STRM_MASK; ctl |= stream << HDAC_SDCTL2_STRM_SHIFT; ctl &= ~HDAC_SDCTL2_STRIPE_MASK; ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT; HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl); HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format); ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); ctl |= 1 << ss; HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS, HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | HDAC_SDCTL_RUN; HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); sc->streams[ss].blksz = blksz; sc->streams[ss].running = 1; hdac_poll_reinit(sc); return (0); } static void hdac_stream_stop(device_t dev, device_t child, int dir, int stream) { struct hdac_softc *sc = device_get_softc(dev); int ss, off; uint32_t ctl; ss = hdac_find_stream(sc, dir, stream); KASSERT(ss >= 0, ("Stop for not allocated stream (%d/%d)\n", dir, stream)); bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE); off = ss << 5; ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | HDAC_SDCTL_RUN); HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); ctl &= ~(1 << ss); HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); sc->streams[ss].running = 0; hdac_poll_reinit(sc); } static void hdac_stream_reset(device_t dev, device_t child, int dir, int stream) { struct hdac_softc *sc = device_get_softc(dev); int timeout = 1000; int to = timeout; int ss, off; uint32_t ctl; ss = hdac_find_stream(sc, dir, stream); KASSERT(ss >= 0, ("Reset for not allocated stream (%d/%d)\n", dir, stream)); off = ss << 5; ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); ctl |= HDAC_SDCTL_SRST; HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); do { ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); if (ctl & HDAC_SDCTL_SRST) break; DELAY(10); } while (--to); if (!(ctl & HDAC_SDCTL_SRST)) device_printf(dev, "Reset setting timeout\n"); ctl &= ~HDAC_SDCTL_SRST; HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); to = timeout; do { ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); if (!(ctl & HDAC_SDCTL_SRST)) break; DELAY(10); } while (--to); if (ctl & HDAC_SDCTL_SRST) device_printf(dev, "Reset timeout!\n"); } static uint32_t hdac_stream_getptr(device_t dev, device_t child, int dir, int stream) { struct hdac_softc *sc = device_get_softc(dev); int ss, off; ss = hdac_find_stream(sc, dir, stream); KASSERT(ss >= 0, ("Reset for not allocated stream (%d/%d)\n", dir, stream)); off = ss << 5; return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB)); } static int hdac_unsol_alloc(device_t dev, device_t child, int tag) { struct hdac_softc *sc = device_get_softc(dev); sc->unsol_registered++; hdac_poll_reinit(sc); return (tag); } static void hdac_unsol_free(device_t dev, device_t child, int tag) { struct hdac_softc *sc = device_get_softc(dev); sc->unsol_registered--; hdac_poll_reinit(sc); } static device_method_t hdac_methods[] = { /* device interface */ DEVMETHOD(device_probe, hdac_probe), DEVMETHOD(device_attach, hdac_attach), DEVMETHOD(device_detach, hdac_detach), DEVMETHOD(device_suspend, hdac_suspend), DEVMETHOD(device_resume, hdac_resume), /* Bus interface */ DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag), DEVMETHOD(bus_print_child, hdac_print_child), - DEVMETHOD(bus_child_location_str, hdac_child_location_str), - DEVMETHOD(bus_child_pnpinfo_str, hdac_child_pnpinfo_str_method), + DEVMETHOD(bus_child_location, hdac_child_location), + DEVMETHOD(bus_child_pnpinfo, hdac_child_pnpinfo_method), DEVMETHOD(bus_read_ivar, hdac_read_ivar), DEVMETHOD(hdac_get_mtx, hdac_get_mtx), DEVMETHOD(hdac_codec_command, hdac_codec_command), DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc), DEVMETHOD(hdac_stream_free, hdac_stream_free), DEVMETHOD(hdac_stream_start, hdac_stream_start), DEVMETHOD(hdac_stream_stop, hdac_stream_stop), DEVMETHOD(hdac_stream_reset, hdac_stream_reset), DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr), DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc), DEVMETHOD(hdac_unsol_free, hdac_unsol_free), DEVMETHOD_END }; static driver_t hdac_driver = { "hdac", hdac_methods, sizeof(struct hdac_softc), }; static devclass_t hdac_devclass; DRIVER_MODULE(snd_hda, pci, hdac_driver, hdac_devclass, NULL, NULL); diff --git a/sys/dev/sound/pci/hda/hdacc.c b/sys/dev/sound/pci/hda/hdacc.c index 3b1ca7ea61e5..ff72498ecb9f 100644 --- a/sys/dev/sound/pci/hda/hdacc.c +++ b/sys/dev/sound/pci/hda/hdacc.c @@ -1,798 +1,797 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Stephane E. Potvin * Copyright (c) 2006 Ariff Abdullah * Copyright (c) 2008-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Intel High Definition Audio (CODEC) driver for FreeBSD. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include SND_DECLARE_FILE("$FreeBSD$"); struct hdacc_fg { device_t dev; nid_t nid; uint8_t type; uint32_t subsystem_id; }; struct hdacc_softc { device_t dev; struct mtx *lock; nid_t cad; device_t streams[2][16]; device_t tags[64]; int fgcnt; struct hdacc_fg *fgs; }; #define hdacc_lock(codec) snd_mtxlock((codec)->lock) #define hdacc_unlock(codec) snd_mtxunlock((codec)->lock) #define hdacc_lockassert(codec) snd_mtxassert((codec)->lock) MALLOC_DEFINE(M_HDACC, "hdacc", "HDA CODEC"); /* CODECs */ static const struct { uint32_t id; uint16_t revid; const char *name; } hdacc_codecs[] = { { HDA_CODEC_CS4206, 0, "Cirrus Logic CS4206" }, { HDA_CODEC_CS4207, 0, "Cirrus Logic CS4207" }, { HDA_CODEC_CS4210, 0, "Cirrus Logic CS4210" }, { HDA_CODEC_ALC215, 0, "Realtek ALC215" }, { HDA_CODEC_ALC221, 0, "Realtek ALC221" }, { HDA_CODEC_ALC222, 0, "Realtek ALC222" }, { HDA_CODEC_ALC225, 0, "Realtek ALC225" }, { HDA_CODEC_ALC231, 0, "Realtek ALC231" }, { HDA_CODEC_ALC233, 0, "Realtek ALC233" }, { HDA_CODEC_ALC234, 0, "Realtek ALC234" }, { HDA_CODEC_ALC235, 0, "Realtek ALC235" }, { HDA_CODEC_ALC236, 0, "Realtek ALC236" }, { HDA_CODEC_ALC245, 0, "Realtek ALC245" }, { HDA_CODEC_ALC255, 0, "Realtek ALC255" }, { HDA_CODEC_ALC256, 0, "Realtek ALC256" }, { HDA_CODEC_ALC257, 0, "Realtek ALC257" }, { HDA_CODEC_ALC260, 0, "Realtek ALC260" }, { HDA_CODEC_ALC262, 0, "Realtek ALC262" }, { HDA_CODEC_ALC267, 0, "Realtek ALC267" }, { HDA_CODEC_ALC268, 0, "Realtek ALC268" }, { HDA_CODEC_ALC269, 0, "Realtek ALC269" }, { HDA_CODEC_ALC270, 0, "Realtek ALC270" }, { HDA_CODEC_ALC272, 0, "Realtek ALC272" }, { HDA_CODEC_ALC273, 0, "Realtek ALC273" }, { HDA_CODEC_ALC274, 0, "Realtek ALC274" }, { HDA_CODEC_ALC275, 0, "Realtek ALC275" }, { HDA_CODEC_ALC276, 0, "Realtek ALC276" }, { HDA_CODEC_ALC292, 0, "Realtek ALC292" }, { HDA_CODEC_ALC295, 0, "Realtek ALC295" }, { HDA_CODEC_ALC280, 0, "Realtek ALC280" }, { HDA_CODEC_ALC282, 0, "Realtek ALC282" }, { HDA_CODEC_ALC283, 0, "Realtek ALC283" }, { HDA_CODEC_ALC284, 0, "Realtek ALC284" }, { HDA_CODEC_ALC285, 0, "Realtek ALC285" }, { HDA_CODEC_ALC286, 0, "Realtek ALC286" }, { HDA_CODEC_ALC288, 0, "Realtek ALC288" }, { HDA_CODEC_ALC289, 0, "Realtek ALC289" }, { HDA_CODEC_ALC290, 0, "Realtek ALC290" }, { HDA_CODEC_ALC292, 0, "Realtek ALC292" }, { HDA_CODEC_ALC293, 0, "Realtek ALC293" }, { HDA_CODEC_ALC294, 0, "Realtek ALC294" }, { HDA_CODEC_ALC295, 0, "Realtek ALC295" }, { HDA_CODEC_ALC298, 0, "Realtek ALC298" }, { HDA_CODEC_ALC299, 0, "Realtek ALC299" }, { HDA_CODEC_ALC300, 0, "Realtek ALC300" }, { HDA_CODEC_ALC623, 0, "Realtek ALC623" }, { HDA_CODEC_ALC660, 0, "Realtek ALC660-VD" }, { HDA_CODEC_ALC662, 0x0002, "Realtek ALC662 rev2" }, { HDA_CODEC_ALC662, 0x0101, "Realtek ALC662 rev1" }, { HDA_CODEC_ALC662, 0x0300, "Realtek ALC662 rev3" }, { HDA_CODEC_ALC662, 0, "Realtek ALC662" }, { HDA_CODEC_ALC663, 0, "Realtek ALC663" }, { HDA_CODEC_ALC665, 0, "Realtek ALC665" }, { HDA_CODEC_ALC670, 0, "Realtek ALC670" }, { HDA_CODEC_ALC671, 0, "Realtek ALC671" }, { HDA_CODEC_ALC680, 0, "Realtek ALC680" }, { HDA_CODEC_ALC700, 0, "Realtek ALC700" }, { HDA_CODEC_ALC701, 0, "Realtek ALC701" }, { HDA_CODEC_ALC703, 0, "Realtek ALC703" }, { HDA_CODEC_ALC861, 0x0340, "Realtek ALC660" }, { HDA_CODEC_ALC861, 0, "Realtek ALC861" }, { HDA_CODEC_ALC861VD, 0, "Realtek ALC861-VD" }, { HDA_CODEC_ALC880, 0, "Realtek ALC880" }, { HDA_CODEC_ALC882, 0, "Realtek ALC882" }, { HDA_CODEC_ALC883, 0, "Realtek ALC883" }, { HDA_CODEC_ALC885, 0x0101, "Realtek ALC889A" }, { HDA_CODEC_ALC885, 0x0103, "Realtek ALC889A" }, { HDA_CODEC_ALC885, 0, "Realtek ALC885" }, { HDA_CODEC_ALC887, 0, "Realtek ALC887" }, { HDA_CODEC_ALC888, 0x0101, "Realtek ALC1200" }, { HDA_CODEC_ALC888, 0, "Realtek ALC888" }, { HDA_CODEC_ALC889, 0, "Realtek ALC889" }, { HDA_CODEC_ALC892, 0, "Realtek ALC892" }, { HDA_CODEC_ALC899, 0, "Realtek ALC899" }, { HDA_CODEC_ALC1150, 0, "Realtek ALC1150" }, { HDA_CODEC_ALCS1200A, 0, "Realtek ALCS1200A" }, { HDA_CODEC_ALC1220_1, 0, "Realtek ALC1220" }, { HDA_CODEC_ALC1220, 0, "Realtek ALC1220" }, { HDA_CODEC_AD1882, 0, "Analog Devices AD1882" }, { HDA_CODEC_AD1882A, 0, "Analog Devices AD1882A" }, { HDA_CODEC_AD1883, 0, "Analog Devices AD1883" }, { HDA_CODEC_AD1884, 0, "Analog Devices AD1884" }, { HDA_CODEC_AD1884A, 0, "Analog Devices AD1884A" }, { HDA_CODEC_AD1981HD, 0, "Analog Devices AD1981HD" }, { HDA_CODEC_AD1983, 0, "Analog Devices AD1983" }, { HDA_CODEC_AD1984, 0, "Analog Devices AD1984" }, { HDA_CODEC_AD1984A, 0, "Analog Devices AD1984A" }, { HDA_CODEC_AD1984B, 0, "Analog Devices AD1984B" }, { HDA_CODEC_AD1986A, 0, "Analog Devices AD1986A" }, { HDA_CODEC_AD1987, 0, "Analog Devices AD1987" }, { HDA_CODEC_AD1988, 0, "Analog Devices AD1988A" }, { HDA_CODEC_AD1988B, 0, "Analog Devices AD1988B" }, { HDA_CODEC_AD1989A, 0, "Analog Devices AD1989A" }, { HDA_CODEC_AD1989B, 0, "Analog Devices AD1989B" }, { HDA_CODEC_CA0110, 0, "Creative CA0110-IBG" }, { HDA_CODEC_CA0110_2, 0, "Creative CA0110-IBG" }, { HDA_CODEC_CA0132, 0, "Creative CA0132" }, { HDA_CODEC_SB0880, 0, "Creative SB0880 X-Fi" }, { HDA_CODEC_CMI9880, 0, "CMedia CMI9880" }, { HDA_CODEC_CMI98802, 0, "CMedia CMI9880" }, { HDA_CODEC_CXD9872RDK, 0, "Sigmatel CXD9872RD/K" }, { HDA_CODEC_CXD9872AKD, 0, "Sigmatel CXD9872AKD" }, { HDA_CODEC_STAC9200D, 0, "Sigmatel STAC9200D" }, { HDA_CODEC_STAC9204X, 0, "Sigmatel STAC9204X" }, { HDA_CODEC_STAC9204D, 0, "Sigmatel STAC9204D" }, { HDA_CODEC_STAC9205X, 0, "Sigmatel STAC9205X" }, { HDA_CODEC_STAC9205D, 0, "Sigmatel STAC9205D" }, { HDA_CODEC_STAC9220, 0, "Sigmatel STAC9220" }, { HDA_CODEC_STAC9220_A1, 0, "Sigmatel STAC9220_A1" }, { HDA_CODEC_STAC9220_A2, 0, "Sigmatel STAC9220_A2" }, { HDA_CODEC_STAC9221, 0, "Sigmatel STAC9221" }, { HDA_CODEC_STAC9221_A2, 0, "Sigmatel STAC9221_A2" }, { HDA_CODEC_STAC9221D, 0, "Sigmatel STAC9221D" }, { HDA_CODEC_STAC922XD, 0, "Sigmatel STAC9220D/9223D" }, { HDA_CODEC_STAC9227X, 0, "Sigmatel STAC9227X" }, { HDA_CODEC_STAC9227D, 0, "Sigmatel STAC9227D" }, { HDA_CODEC_STAC9228X, 0, "Sigmatel STAC9228X" }, { HDA_CODEC_STAC9228D, 0, "Sigmatel STAC9228D" }, { HDA_CODEC_STAC9229X, 0, "Sigmatel STAC9229X" }, { HDA_CODEC_STAC9229D, 0, "Sigmatel STAC9229D" }, { HDA_CODEC_STAC9230X, 0, "Sigmatel STAC9230X" }, { HDA_CODEC_STAC9230D, 0, "Sigmatel STAC9230D" }, { HDA_CODEC_STAC9250, 0, "Sigmatel STAC9250" }, { HDA_CODEC_STAC9251, 0, "Sigmatel STAC9251" }, { HDA_CODEC_STAC9255, 0, "Sigmatel STAC9255" }, { HDA_CODEC_STAC9255D, 0, "Sigmatel STAC9255D" }, { HDA_CODEC_STAC9254, 0, "Sigmatel STAC9254" }, { HDA_CODEC_STAC9254D, 0, "Sigmatel STAC9254D" }, { HDA_CODEC_STAC9271X, 0, "Sigmatel STAC9271X" }, { HDA_CODEC_STAC9271D, 0, "Sigmatel STAC9271D" }, { HDA_CODEC_STAC9272X, 0, "Sigmatel STAC9272X" }, { HDA_CODEC_STAC9272D, 0, "Sigmatel STAC9272D" }, { HDA_CODEC_STAC9273X, 0, "Sigmatel STAC9273X" }, { HDA_CODEC_STAC9273D, 0, "Sigmatel STAC9273D" }, { HDA_CODEC_STAC9274, 0, "Sigmatel STAC9274" }, { HDA_CODEC_STAC9274D, 0, "Sigmatel STAC9274D" }, { HDA_CODEC_STAC9274X5NH, 0, "Sigmatel STAC9274X5NH" }, { HDA_CODEC_STAC9274D5NH, 0, "Sigmatel STAC9274D5NH" }, { HDA_CODEC_STAC9872AK, 0, "Sigmatel STAC9872AK" }, { HDA_CODEC_IDT92HD005, 0, "IDT 92HD005" }, { HDA_CODEC_IDT92HD005D, 0, "IDT 92HD005D" }, { HDA_CODEC_IDT92HD206X, 0, "IDT 92HD206X" }, { HDA_CODEC_IDT92HD206D, 0, "IDT 92HD206D" }, { HDA_CODEC_IDT92HD66B1X5, 0, "IDT 92HD66B1X5" }, { HDA_CODEC_IDT92HD66B2X5, 0, "IDT 92HD66B2X5" }, { HDA_CODEC_IDT92HD66B3X5, 0, "IDT 92HD66B3X5" }, { HDA_CODEC_IDT92HD66C1X5, 0, "IDT 92HD66C1X5" }, { HDA_CODEC_IDT92HD66C2X5, 0, "IDT 92HD66C2X5" }, { HDA_CODEC_IDT92HD66C3X5, 0, "IDT 92HD66C3X5" }, { HDA_CODEC_IDT92HD66B1X3, 0, "IDT 92HD66B1X3" }, { HDA_CODEC_IDT92HD66B2X3, 0, "IDT 92HD66B2X3" }, { HDA_CODEC_IDT92HD66B3X3, 0, "IDT 92HD66B3X3" }, { HDA_CODEC_IDT92HD66C1X3, 0, "IDT 92HD66C1X3" }, { HDA_CODEC_IDT92HD66C2X3, 0, "IDT 92HD66C2X3" }, { HDA_CODEC_IDT92HD66C3_65, 0, "IDT 92HD66C3_65" }, { HDA_CODEC_IDT92HD700X, 0, "IDT 92HD700X" }, { HDA_CODEC_IDT92HD700D, 0, "IDT 92HD700D" }, { HDA_CODEC_IDT92HD71B5, 0, "IDT 92HD71B5" }, { HDA_CODEC_IDT92HD71B5_2, 0, "IDT 92HD71B5" }, { HDA_CODEC_IDT92HD71B6, 0, "IDT 92HD71B6" }, { HDA_CODEC_IDT92HD71B6_2, 0, "IDT 92HD71B6" }, { HDA_CODEC_IDT92HD71B7, 0, "IDT 92HD71B7" }, { HDA_CODEC_IDT92HD71B7_2, 0, "IDT 92HD71B7" }, { HDA_CODEC_IDT92HD71B8, 0, "IDT 92HD71B8" }, { HDA_CODEC_IDT92HD71B8_2, 0, "IDT 92HD71B8" }, { HDA_CODEC_IDT92HD73C1, 0, "IDT 92HD73C1" }, { HDA_CODEC_IDT92HD73D1, 0, "IDT 92HD73D1" }, { HDA_CODEC_IDT92HD73E1, 0, "IDT 92HD73E1" }, { HDA_CODEC_IDT92HD75B3, 0, "IDT 92HD75B3" }, { HDA_CODEC_IDT92HD75BX, 0, "IDT 92HD75BX" }, { HDA_CODEC_IDT92HD81B1C, 0, "IDT 92HD81B1C" }, { HDA_CODEC_IDT92HD81B1X, 0, "IDT 92HD81B1X" }, { HDA_CODEC_IDT92HD83C1C, 0, "IDT 92HD83C1C" }, { HDA_CODEC_IDT92HD83C1X, 0, "IDT 92HD83C1X" }, { HDA_CODEC_IDT92HD87B1_3, 0, "IDT 92HD87B1/3" }, { HDA_CODEC_IDT92HD87B2_4, 0, "IDT 92HD87B2/4" }, { HDA_CODEC_IDT92HD89C3, 0, "IDT 92HD89C3" }, { HDA_CODEC_IDT92HD89C2, 0, "IDT 92HD89C2" }, { HDA_CODEC_IDT92HD89C1, 0, "IDT 92HD89C1" }, { HDA_CODEC_IDT92HD89B3, 0, "IDT 92HD89B3" }, { HDA_CODEC_IDT92HD89B2, 0, "IDT 92HD89B2" }, { HDA_CODEC_IDT92HD89B1, 0, "IDT 92HD89B1" }, { HDA_CODEC_IDT92HD89E3, 0, "IDT 92HD89E3" }, { HDA_CODEC_IDT92HD89E2, 0, "IDT 92HD89E2" }, { HDA_CODEC_IDT92HD89E1, 0, "IDT 92HD89E1" }, { HDA_CODEC_IDT92HD89D3, 0, "IDT 92HD89D3" }, { HDA_CODEC_IDT92HD89D2, 0, "IDT 92HD89D2" }, { HDA_CODEC_IDT92HD89D1, 0, "IDT 92HD89D1" }, { HDA_CODEC_IDT92HD89F3, 0, "IDT 92HD89F3" }, { HDA_CODEC_IDT92HD89F2, 0, "IDT 92HD89F2" }, { HDA_CODEC_IDT92HD89F1, 0, "IDT 92HD89F1" }, { HDA_CODEC_IDT92HD90BXX, 0, "IDT 92HD90BXX" }, { HDA_CODEC_IDT92HD91BXX, 0, "IDT 92HD91BXX" }, { HDA_CODEC_IDT92HD93BXX, 0, "IDT 92HD93BXX" }, { HDA_CODEC_IDT92HD98BXX, 0, "IDT 92HD98BXX" }, { HDA_CODEC_IDT92HD99BXX, 0, "IDT 92HD99BXX" }, { HDA_CODEC_CX20549, 0, "Conexant CX20549 (Venice)" }, { HDA_CODEC_CX20551, 0, "Conexant CX20551 (Waikiki)" }, { HDA_CODEC_CX20561, 0, "Conexant CX20561 (Hermosa)" }, { HDA_CODEC_CX20582, 0, "Conexant CX20582 (Pebble)" }, { HDA_CODEC_CX20583, 0, "Conexant CX20583 (Pebble HSF)" }, { HDA_CODEC_CX20584, 0, "Conexant CX20584" }, { HDA_CODEC_CX20585, 0, "Conexant CX20585" }, { HDA_CODEC_CX20588, 0, "Conexant CX20588" }, { HDA_CODEC_CX20590, 0, "Conexant CX20590" }, { HDA_CODEC_CX20631, 0, "Conexant CX20631" }, { HDA_CODEC_CX20632, 0, "Conexant CX20632" }, { HDA_CODEC_CX20641, 0, "Conexant CX20641" }, { HDA_CODEC_CX20642, 0, "Conexant CX20642" }, { HDA_CODEC_CX20651, 0, "Conexant CX20651" }, { HDA_CODEC_CX20652, 0, "Conexant CX20652" }, { HDA_CODEC_CX20664, 0, "Conexant CX20664" }, { HDA_CODEC_CX20665, 0, "Conexant CX20665" }, { HDA_CODEC_CX21722, 0, "Conexant CX21722" }, { HDA_CODEC_CX20722, 0, "Conexant CX20722" }, { HDA_CODEC_CX21724, 0, "Conexant CX21724" }, { HDA_CODEC_CX20724, 0, "Conexant CX20724" }, { HDA_CODEC_CX20751, 0, "Conexant CX20751/2" }, { HDA_CODEC_CX20751_2, 0, "Conexant CX20751/2" }, { HDA_CODEC_CX20753, 0, "Conexant CX20753/4" }, { HDA_CODEC_CX20755, 0, "Conexant CX20755" }, { HDA_CODEC_CX20756, 0, "Conexant CX20756" }, { HDA_CODEC_CX20757, 0, "Conexant CX20757" }, { HDA_CODEC_CX20952, 0, "Conexant CX20952" }, { HDA_CODEC_VT1708_8, 0, "VIA VT1708_8" }, { HDA_CODEC_VT1708_9, 0, "VIA VT1708_9" }, { HDA_CODEC_VT1708_A, 0, "VIA VT1708_A" }, { HDA_CODEC_VT1708_B, 0, "VIA VT1708_B" }, { HDA_CODEC_VT1709_0, 0, "VIA VT1709_0" }, { HDA_CODEC_VT1709_1, 0, "VIA VT1709_1" }, { HDA_CODEC_VT1709_2, 0, "VIA VT1709_2" }, { HDA_CODEC_VT1709_3, 0, "VIA VT1709_3" }, { HDA_CODEC_VT1709_4, 0, "VIA VT1709_4" }, { HDA_CODEC_VT1709_5, 0, "VIA VT1709_5" }, { HDA_CODEC_VT1709_6, 0, "VIA VT1709_6" }, { HDA_CODEC_VT1709_7, 0, "VIA VT1709_7" }, { HDA_CODEC_VT1708B_0, 0, "VIA VT1708B_0" }, { HDA_CODEC_VT1708B_1, 0, "VIA VT1708B_1" }, { HDA_CODEC_VT1708B_2, 0, "VIA VT1708B_2" }, { HDA_CODEC_VT1708B_3, 0, "VIA VT1708B_3" }, { HDA_CODEC_VT1708B_4, 0, "VIA VT1708B_4" }, { HDA_CODEC_VT1708B_5, 0, "VIA VT1708B_5" }, { HDA_CODEC_VT1708B_6, 0, "VIA VT1708B_6" }, { HDA_CODEC_VT1708B_7, 0, "VIA VT1708B_7" }, { HDA_CODEC_VT1708S_0, 0, "VIA VT1708S_0" }, { HDA_CODEC_VT1708S_1, 0, "VIA VT1708S_1" }, { HDA_CODEC_VT1708S_2, 0, "VIA VT1708S_2" }, { HDA_CODEC_VT1708S_3, 0, "VIA VT1708S_3" }, { HDA_CODEC_VT1708S_4, 0, "VIA VT1708S_4" }, { HDA_CODEC_VT1708S_5, 0, "VIA VT1708S_5" }, { HDA_CODEC_VT1708S_6, 0, "VIA VT1708S_6" }, { HDA_CODEC_VT1708S_7, 0, "VIA VT1708S_7" }, { HDA_CODEC_VT1702_0, 0, "VIA VT1702_0" }, { HDA_CODEC_VT1702_1, 0, "VIA VT1702_1" }, { HDA_CODEC_VT1702_2, 0, "VIA VT1702_2" }, { HDA_CODEC_VT1702_3, 0, "VIA VT1702_3" }, { HDA_CODEC_VT1702_4, 0, "VIA VT1702_4" }, { HDA_CODEC_VT1702_5, 0, "VIA VT1702_5" }, { HDA_CODEC_VT1702_6, 0, "VIA VT1702_6" }, { HDA_CODEC_VT1702_7, 0, "VIA VT1702_7" }, { HDA_CODEC_VT1716S_0, 0, "VIA VT1716S_0" }, { HDA_CODEC_VT1716S_1, 0, "VIA VT1716S_1" }, { HDA_CODEC_VT1718S_0, 0, "VIA VT1718S_0" }, { HDA_CODEC_VT1718S_1, 0, "VIA VT1718S_1" }, { HDA_CODEC_VT1802_0, 0, "VIA VT1802_0" }, { HDA_CODEC_VT1802_1, 0, "VIA VT1802_1" }, { HDA_CODEC_VT1812, 0, "VIA VT1812" }, { HDA_CODEC_VT1818S, 0, "VIA VT1818S" }, { HDA_CODEC_VT1828S, 0, "VIA VT1828S" }, { HDA_CODEC_VT2002P_0, 0, "VIA VT2002P_0" }, { HDA_CODEC_VT2002P_1, 0, "VIA VT2002P_1" }, { HDA_CODEC_VT2020, 0, "VIA VT2020" }, { HDA_CODEC_ATIRS600_1, 0, "ATI RS600" }, { HDA_CODEC_ATIRS600_2, 0, "ATI RS600" }, { HDA_CODEC_ATIRS690, 0, "ATI RS690/780" }, { HDA_CODEC_ATIR6XX, 0, "ATI R6xx" }, { HDA_CODEC_NVIDIAMCP67, 0, "NVIDIA MCP67" }, { HDA_CODEC_NVIDIAMCP73, 0, "NVIDIA MCP73" }, { HDA_CODEC_NVIDIAMCP78, 0, "NVIDIA MCP78" }, { HDA_CODEC_NVIDIAMCP78_2, 0, "NVIDIA MCP78" }, { HDA_CODEC_NVIDIAMCP78_3, 0, "NVIDIA MCP78" }, { HDA_CODEC_NVIDIAMCP78_4, 0, "NVIDIA MCP78" }, { HDA_CODEC_NVIDIAMCP7A, 0, "NVIDIA MCP7A" }, { HDA_CODEC_NVIDIAGT220, 0, "NVIDIA GT220" }, { HDA_CODEC_NVIDIAGT21X, 0, "NVIDIA GT21x" }, { HDA_CODEC_NVIDIAMCP89, 0, "NVIDIA MCP89" }, { HDA_CODEC_NVIDIAGT240, 0, "NVIDIA GT240" }, { HDA_CODEC_NVIDIAGTS450, 0, "NVIDIA GTS450" }, { HDA_CODEC_NVIDIAGT440, 0, "NVIDIA GT440" }, { HDA_CODEC_NVIDIAGTX550, 0, "NVIDIA GTX550" }, { HDA_CODEC_NVIDIAGTX570, 0, "NVIDIA GTX570" }, { HDA_CODEC_NVIDIATEGRA30, 0, "NVIDIA Tegra30" }, { HDA_CODEC_NVIDIATEGRA114, 0, "NVIDIA Tegra114" }, { HDA_CODEC_NVIDIATEGRA124, 0, "NVIDIA Tegra124" }, { HDA_CODEC_NVIDIATEGRA210, 0, "NVIDIA Tegra210" }, { HDA_CODEC_INTELIP, 0, "Intel Ibex Peak" }, { HDA_CODEC_INTELBL, 0, "Intel Bearlake" }, { HDA_CODEC_INTELCA, 0, "Intel Cantiga" }, { HDA_CODEC_INTELEL, 0, "Intel Eaglelake" }, { HDA_CODEC_INTELIP2, 0, "Intel Ibex Peak" }, { HDA_CODEC_INTELCPT, 0, "Intel Cougar Point" }, { HDA_CODEC_INTELPPT, 0, "Intel Panther Point" }, { HDA_CODEC_INTELHSW, 0, "Intel Haswell" }, { HDA_CODEC_INTELBDW, 0, "Intel Broadwell" }, { HDA_CODEC_INTELSKLK, 0, "Intel Skylake" }, { HDA_CODEC_INTELKBLK, 0, "Intel Kaby Lake" }, { HDA_CODEC_INTELJLK, 0, "Intel Jasper Lake" }, { HDA_CODEC_INTELELLK, 0, "Intel Elkhart Lake" }, { HDA_CODEC_INTELCT, 0, "Intel Cedar Trail" }, { HDA_CODEC_INTELVV2, 0, "Intel Valleyview2" }, { HDA_CODEC_INTELBR, 0, "Intel Braswell" }, { HDA_CODEC_INTELCL, 0, "Intel Crestline" }, { HDA_CODEC_INTELBXTN, 0, "Intel Broxton" }, { HDA_CODEC_INTELCNLK, 0, "Intel Cannon Lake" }, { HDA_CODEC_INTELGMLK, 0, "Intel Gemini Lake" }, { HDA_CODEC_INTELGMLK1, 0, "Intel Gemini Lake" }, { HDA_CODEC_INTELICLK, 0, "Intel Ice Lake" }, { HDA_CODEC_INTELTGLK, 0, "Intel Tiger Lake" }, { HDA_CODEC_SII1390, 0, "Silicon Image SiI1390" }, { HDA_CODEC_SII1392, 0, "Silicon Image SiI1392" }, /* Unknown CODECs */ { HDA_CODEC_ADXXXX, 0, "Analog Devices" }, { HDA_CODEC_AGEREXXXX, 0, "Lucent/Agere Systems" }, { HDA_CODEC_ALCXXXX, 0, "Realtek" }, { HDA_CODEC_ATIXXXX, 0, "ATI" }, { HDA_CODEC_CAXXXX, 0, "Creative" }, { HDA_CODEC_CMIXXXX, 0, "CMedia" }, { HDA_CODEC_CMIXXXX2, 0, "CMedia" }, { HDA_CODEC_CSXXXX, 0, "Cirrus Logic" }, { HDA_CODEC_CXXXXX, 0, "Conexant" }, { HDA_CODEC_CHXXXX, 0, "Chrontel" }, { HDA_CODEC_IDTXXXX, 0, "IDT" }, { HDA_CODEC_INTELXXXX, 0, "Intel" }, { HDA_CODEC_MOTOXXXX, 0, "Motorola" }, { HDA_CODEC_NVIDIAXXXX, 0, "NVIDIA" }, { HDA_CODEC_SIIXXXX, 0, "Silicon Image" }, { HDA_CODEC_STACXXXX, 0, "Sigmatel" }, { HDA_CODEC_VTXXXX, 0, "VIA" }, }; static int hdacc_suspend(device_t dev) { HDA_BOOTHVERBOSE( device_printf(dev, "Suspend...\n"); ); bus_generic_suspend(dev); HDA_BOOTHVERBOSE( device_printf(dev, "Suspend done\n"); ); return (0); } static int hdacc_resume(device_t dev) { HDA_BOOTHVERBOSE( device_printf(dev, "Resume...\n"); ); bus_generic_resume(dev); HDA_BOOTHVERBOSE( device_printf(dev, "Resume done\n"); ); return (0); } static int hdacc_probe(device_t dev) { uint32_t id, revid; char buf[128]; int i; id = ((uint32_t)hda_get_vendor_id(dev) << 16) + hda_get_device_id(dev); revid = ((uint32_t)hda_get_revision_id(dev) << 8) + hda_get_stepping_id(dev); for (i = 0; i < nitems(hdacc_codecs); i++) { if (!HDA_DEV_MATCH(hdacc_codecs[i].id, id)) continue; if (hdacc_codecs[i].revid != 0 && hdacc_codecs[i].revid != revid) continue; break; } if (i < nitems(hdacc_codecs)) { if ((hdacc_codecs[i].id & 0xffff) != 0xffff) strlcpy(buf, hdacc_codecs[i].name, sizeof(buf)); else snprintf(buf, sizeof(buf), "%s (0x%04x)", hdacc_codecs[i].name, hda_get_device_id(dev)); } else snprintf(buf, sizeof(buf), "Generic (0x%04x)", id); strlcat(buf, " HDA CODEC", sizeof(buf)); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } static int hdacc_attach(device_t dev) { struct hdacc_softc *codec = device_get_softc(dev); device_t child; int cad = (intptr_t)device_get_ivars(dev); uint32_t subnode; int startnode; int endnode; int i, n; codec->lock = HDAC_GET_MTX(device_get_parent(dev), dev); codec->dev = dev; codec->cad = cad; hdacc_lock(codec); subnode = hda_command(dev, HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_SUB_NODE_COUNT)); hdacc_unlock(codec); if (subnode == HDA_INVALID) return (EIO); codec->fgcnt = HDA_PARAM_SUB_NODE_COUNT_TOTAL(subnode); startnode = HDA_PARAM_SUB_NODE_COUNT_START(subnode); endnode = startnode + codec->fgcnt; HDA_BOOTHVERBOSE( device_printf(dev, "Root Node at nid=0: %d subnodes %d-%d\n", HDA_PARAM_SUB_NODE_COUNT_TOTAL(subnode), startnode, endnode - 1); ); codec->fgs = malloc(sizeof(struct hdacc_fg) * codec->fgcnt, M_HDACC, M_ZERO | M_WAITOK); for (i = startnode, n = 0; i < endnode; i++, n++) { codec->fgs[n].nid = i; hdacc_lock(codec); codec->fgs[n].type = HDA_PARAM_FCT_GRP_TYPE_NODE_TYPE(hda_command(dev, HDA_CMD_GET_PARAMETER(0, i, HDA_PARAM_FCT_GRP_TYPE))); codec->fgs[n].subsystem_id = hda_command(dev, HDA_CMD_GET_SUBSYSTEM_ID(0, i)); hdacc_unlock(codec); codec->fgs[n].dev = child = device_add_child(dev, NULL, -1); if (child == NULL) { device_printf(dev, "Failed to add function device\n"); continue; } device_set_ivars(child, &codec->fgs[n]); } bus_generic_attach(dev); return (0); } static int hdacc_detach(device_t dev) { struct hdacc_softc *codec = device_get_softc(dev); int error; error = device_delete_children(dev); free(codec->fgs, M_HDACC); return (error); } static int -hdacc_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) +hdacc_child_location(device_t dev, device_t child, struct sbuf *sb) { struct hdacc_fg *fg = device_get_ivars(child); - snprintf(buf, buflen, "nid=%d", fg->nid); + sbuf_printf(sb, "nid=%d", fg->nid); return (0); } static int -hdacc_child_pnpinfo_str_method(device_t dev, device_t child, char *buf, - size_t buflen) +hdacc_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb) { struct hdacc_fg *fg = device_get_ivars(child); - snprintf(buf, buflen, "type=0x%02x subsystem=0x%08x", + sbuf_printf(sb, "type=0x%02x subsystem=0x%08x", fg->type, fg->subsystem_id); return (0); } static int hdacc_print_child(device_t dev, device_t child) { struct hdacc_fg *fg = device_get_ivars(child); int retval; retval = bus_print_child_header(dev, child); retval += printf(" at nid %d", fg->nid); retval += bus_print_child_footer(dev, child); return (retval); } static void hdacc_probe_nomatch(device_t dev, device_t child) { struct hdacc_softc *codec = device_get_softc(dev); struct hdacc_fg *fg = device_get_ivars(child); device_printf(child, "<%s %s Function Group> at nid %d on %s " "(no driver attached)\n", device_get_desc(dev), fg->type == HDA_PARAM_FCT_GRP_TYPE_NODE_TYPE_AUDIO ? "Audio" : (fg->type == HDA_PARAM_FCT_GRP_TYPE_NODE_TYPE_MODEM ? "Modem" : "Unknown"), fg->nid, device_get_nameunit(dev)); HDA_BOOTVERBOSE( device_printf(dev, "Subsystem ID: 0x%08x\n", hda_get_subsystem_id(dev)); ); HDA_BOOTHVERBOSE( device_printf(dev, "Power down FG nid=%d to the D3 state...\n", fg->nid); ); hdacc_lock(codec); hda_command(dev, HDA_CMD_SET_POWER_STATE(0, fg->nid, HDA_CMD_POWER_STATE_D3)); hdacc_unlock(codec); } static int hdacc_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct hdacc_fg *fg = device_get_ivars(child); switch (which) { case HDA_IVAR_NODE_ID: *result = fg->nid; break; case HDA_IVAR_NODE_TYPE: *result = fg->type; break; case HDA_IVAR_SUBSYSTEM_ID: *result = fg->subsystem_id; break; default: return(BUS_READ_IVAR(device_get_parent(dev), dev, which, result)); } return (0); } static struct mtx * hdacc_get_mtx(device_t dev, device_t child) { struct hdacc_softc *codec = device_get_softc(dev); return (codec->lock); } static uint32_t hdacc_codec_command(device_t dev, device_t child, uint32_t verb) { return (HDAC_CODEC_COMMAND(device_get_parent(dev), dev, verb)); } static int hdacc_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, uint32_t **dmapos) { struct hdacc_softc *codec = device_get_softc(dev); int stream; stream = HDAC_STREAM_ALLOC(device_get_parent(dev), dev, dir, format, stripe, dmapos); if (stream > 0) codec->streams[dir][stream] = child; return (stream); } static void hdacc_stream_free(device_t dev, device_t child, int dir, int stream) { struct hdacc_softc *codec = device_get_softc(dev); codec->streams[dir][stream] = NULL; HDAC_STREAM_FREE(device_get_parent(dev), dev, dir, stream); } static int hdacc_stream_start(device_t dev, device_t child, int dir, int stream, bus_addr_t buf, int blksz, int blkcnt) { return (HDAC_STREAM_START(device_get_parent(dev), dev, dir, stream, buf, blksz, blkcnt)); } static void hdacc_stream_stop(device_t dev, device_t child, int dir, int stream) { HDAC_STREAM_STOP(device_get_parent(dev), dev, dir, stream); } static void hdacc_stream_reset(device_t dev, device_t child, int dir, int stream) { HDAC_STREAM_RESET(device_get_parent(dev), dev, dir, stream); } static uint32_t hdacc_stream_getptr(device_t dev, device_t child, int dir, int stream) { return (HDAC_STREAM_GETPTR(device_get_parent(dev), dev, dir, stream)); } static void hdacc_stream_intr(device_t dev, int dir, int stream) { struct hdacc_softc *codec = device_get_softc(dev); device_t child; if ((child = codec->streams[dir][stream]) != NULL) HDAC_STREAM_INTR(child, dir, stream); } static int hdacc_unsol_alloc(device_t dev, device_t child, int wanted) { struct hdacc_softc *codec = device_get_softc(dev); int tag; wanted &= 0x3f; tag = wanted; do { if (codec->tags[tag] == NULL) { codec->tags[tag] = child; HDAC_UNSOL_ALLOC(device_get_parent(dev), dev, tag); return (tag); } tag++; tag &= 0x3f; } while (tag != wanted); return (-1); } static void hdacc_unsol_free(device_t dev, device_t child, int tag) { struct hdacc_softc *codec = device_get_softc(dev); KASSERT(tag >= 0 && tag <= 0x3f, ("Wrong tag value %d\n", tag)); codec->tags[tag] = NULL; HDAC_UNSOL_FREE(device_get_parent(dev), dev, tag); } static void hdacc_unsol_intr(device_t dev, uint32_t resp) { struct hdacc_softc *codec = device_get_softc(dev); device_t child; int tag; tag = resp >> 26; if ((child = codec->tags[tag]) != NULL) HDAC_UNSOL_INTR(child, resp); else device_printf(codec->dev, "Unexpected unsolicited " "response with tag %d: %08x\n", tag, resp); } static void hdacc_pindump(device_t dev) { device_t *devlist; int devcount, i; if (device_get_children(dev, &devlist, &devcount) != 0) return; for (i = 0; i < devcount; i++) HDAC_PINDUMP(devlist[i]); free(devlist, M_TEMP); } static device_method_t hdacc_methods[] = { /* device interface */ DEVMETHOD(device_probe, hdacc_probe), DEVMETHOD(device_attach, hdacc_attach), DEVMETHOD(device_detach, hdacc_detach), DEVMETHOD(device_suspend, hdacc_suspend), DEVMETHOD(device_resume, hdacc_resume), /* Bus interface */ - DEVMETHOD(bus_child_location_str, hdacc_child_location_str), - DEVMETHOD(bus_child_pnpinfo_str, hdacc_child_pnpinfo_str_method), + DEVMETHOD(bus_child_location, hdacc_child_location), + DEVMETHOD(bus_child_pnpinfo, hdacc_child_pnpinfo_method), DEVMETHOD(bus_print_child, hdacc_print_child), DEVMETHOD(bus_probe_nomatch, hdacc_probe_nomatch), DEVMETHOD(bus_read_ivar, hdacc_read_ivar), DEVMETHOD(hdac_get_mtx, hdacc_get_mtx), DEVMETHOD(hdac_codec_command, hdacc_codec_command), DEVMETHOD(hdac_stream_alloc, hdacc_stream_alloc), DEVMETHOD(hdac_stream_free, hdacc_stream_free), DEVMETHOD(hdac_stream_start, hdacc_stream_start), DEVMETHOD(hdac_stream_stop, hdacc_stream_stop), DEVMETHOD(hdac_stream_reset, hdacc_stream_reset), DEVMETHOD(hdac_stream_getptr, hdacc_stream_getptr), DEVMETHOD(hdac_stream_intr, hdacc_stream_intr), DEVMETHOD(hdac_unsol_alloc, hdacc_unsol_alloc), DEVMETHOD(hdac_unsol_free, hdacc_unsol_free), DEVMETHOD(hdac_unsol_intr, hdacc_unsol_intr), DEVMETHOD(hdac_pindump, hdacc_pindump), DEVMETHOD_END }; static driver_t hdacc_driver = { "hdacc", hdacc_methods, sizeof(struct hdacc_softc), }; static devclass_t hdacc_devclass; DRIVER_MODULE(snd_hda, hdac, hdacc_driver, hdacc_devclass, NULL, NULL); diff --git a/sys/dev/spibus/ofw_spibus.c b/sys/dev/spibus/ofw_spibus.c index f24b12dccb90..7aae7f44b034 100644 --- a/sys/dev/spibus/ofw_spibus.c +++ b/sys/dev/spibus/ofw_spibus.c @@ -1,241 +1,241 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009, Nathan Whitehorn * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Oleksandr Rybalko * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include "spibus_if.h" struct ofw_spibus_devinfo { struct spibus_ivar opd_dinfo; struct ofw_bus_devinfo opd_obdinfo; }; /* Methods */ static device_probe_t ofw_spibus_probe; static device_attach_t ofw_spibus_attach; static device_t ofw_spibus_add_child(device_t dev, u_int order, const char *name, int unit); static const struct ofw_bus_devinfo *ofw_spibus_get_devinfo(device_t bus, device_t dev); static int ofw_spibus_probe(device_t dev) { if (ofw_bus_get_node(dev) == -1) return (ENXIO); device_set_desc(dev, "OFW SPI bus"); return (BUS_PROBE_DEFAULT); } static int ofw_spibus_attach(device_t dev) { struct spibus_softc *sc = device_get_softc(dev); struct ofw_spibus_devinfo *dinfo; phandle_t child; pcell_t clock, paddr; device_t childdev; uint32_t mode = SPIBUS_MODE_NONE; sc->dev = dev; bus_generic_probe(dev); bus_enumerate_hinted_children(dev); /* * Attach those children represented in the device tree. */ for (child = OF_child(ofw_bus_get_node(dev)); child != 0; child = OF_peer(child)) { /* * Try to get the CS number first from the spi-chipselect * property, then try the reg property. */ if (OF_getencprop(child, "spi-chipselect", &paddr, sizeof(paddr)) == -1) { if (OF_getencprop(child, "reg", &paddr, sizeof(paddr)) == -1) continue; } /* * Try to get the cpol/cpha mode */ if (OF_hasprop(child, "spi-cpol")) mode = SPIBUS_MODE_CPOL; if (OF_hasprop(child, "spi-cpha")) { if (mode == SPIBUS_MODE_CPOL) mode = SPIBUS_MODE_CPOL_CPHA; else mode = SPIBUS_MODE_CPHA; } /* * Try to get the CS polarity */ if (OF_hasprop(child, "spi-cs-high")) paddr |= SPIBUS_CS_HIGH; /* * Get the maximum clock frequency for device, zero means * use the default bus speed. * * XXX Note that the current (2018-04-07) dts bindings say that * spi-max-frequency is a required property (but says nothing of * how to interpret a value of zero). */ if (OF_getencprop(child, "spi-max-frequency", &clock, sizeof(clock)) == -1) clock = 0; /* * Now set up the SPI and OFW bus layer devinfo and add it * to the bus. */ dinfo = malloc(sizeof(struct ofw_spibus_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (dinfo == NULL) continue; dinfo->opd_dinfo.cs = paddr; dinfo->opd_dinfo.clock = clock; dinfo->opd_dinfo.mode = mode; if (ofw_bus_gen_setup_devinfo(&dinfo->opd_obdinfo, child) != 0) { free(dinfo, M_DEVBUF); continue; } childdev = device_add_child(dev, NULL, -1); resource_list_init(&dinfo->opd_dinfo.rl); ofw_bus_intr_to_rl(childdev, child, &dinfo->opd_dinfo.rl, NULL); device_set_ivars(childdev, dinfo); } return (bus_generic_attach(dev)); } static device_t ofw_spibus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct ofw_spibus_devinfo *devi; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(sizeof(struct ofw_spibus_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (0); } /* * NULL all the OFW-related parts of the ivars for non-OFW * children. */ devi->opd_obdinfo.obd_node = -1; devi->opd_obdinfo.obd_name = NULL; devi->opd_obdinfo.obd_compat = NULL; devi->opd_obdinfo.obd_type = NULL; devi->opd_obdinfo.obd_model = NULL; device_set_ivars(child, devi); return (child); } static const struct ofw_bus_devinfo * ofw_spibus_get_devinfo(device_t bus, device_t dev) { struct ofw_spibus_devinfo *dinfo; dinfo = device_get_ivars(dev); return (&dinfo->opd_obdinfo); } static struct resource_list * ofw_spibus_get_resource_list(device_t bus __unused, device_t child) { struct spibus_ivar *devi; devi = SPIBUS_IVAR(child); return (&devi->rl); } static device_method_t ofw_spibus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_spibus_probe), DEVMETHOD(device_attach, ofw_spibus_attach), /* Bus interface */ - DEVMETHOD(bus_child_pnpinfo_str, ofw_bus_gen_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_add_child, ofw_spibus_add_child), DEVMETHOD(bus_get_resource_list, ofw_spibus_get_resource_list), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, ofw_spibus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; devclass_t ofw_spibus_devclass; DEFINE_CLASS_1(spibus, ofw_spibus_driver, ofw_spibus_methods, sizeof(struct spibus_softc), spibus_driver); DRIVER_MODULE(ofw_spibus, spi, ofw_spibus_driver, ofw_spibus_devclass, 0, 0); MODULE_VERSION(ofw_spibus, 1); MODULE_DEPEND(ofw_spibus, spibus, 1, 1, 1); diff --git a/sys/dev/spibus/spibus.c b/sys/dev/spibus/spibus.c index 77b35b657381..f9392131e7b6 100644 --- a/sys/dev/spibus/spibus.c +++ b/sys/dev/spibus/spibus.c @@ -1,292 +1,282 @@ /*- * Copyright (c) 2006 M. Warner Losh * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include +#include #include -#include #include #include #include #include #include #include #include "spibus_if.h" static int spibus_probe(device_t dev) { device_set_desc(dev, "SPI bus"); return (BUS_PROBE_DEFAULT); } static int spibus_attach(device_t dev) { struct spibus_softc *sc = SPIBUS_SOFTC(dev); sc->dev = dev; bus_enumerate_hinted_children(dev); return (bus_generic_attach(dev)); } /* * Since this is not a self-enumerating bus, and since we always add * children in attach, we have to always delete children here. */ static int spibus_detach(device_t dev) { int err; if ((err = bus_generic_detach(dev)) != 0) return (err); device_delete_children(dev); return (0); } static int spibus_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int spibus_resume(device_t dev) { return (bus_generic_resume(dev)); } static int spibus_print_child(device_t dev, device_t child) { struct spibus_ivar *devi = SPIBUS_IVAR(child); int retval = 0; retval += bus_print_child_header(dev, child); retval += printf(" at cs %d", devi->cs); retval += printf(" mode %d", devi->mode); retval += resource_list_print_type(&devi->rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void spibus_probe_nomatch(device_t bus, device_t child) { struct spibus_ivar *devi = SPIBUS_IVAR(child); device_printf(bus, " at cs %d mode %d\n", devi->cs, devi->mode); return; } static int -spibus_child_location_str(device_t bus, device_t child, char *buf, - size_t buflen) +spibus_child_location(device_t bus, device_t child, struct sbuf *sb) { struct spibus_ivar *devi = SPIBUS_IVAR(child); int cs; cs = devi->cs & ~SPIBUS_CS_HIGH; /* trim 'cs high' bit */ - snprintf(buf, buflen, "bus=%d cs=%d", device_get_unit(bus), cs); - return (0); -} - -static int -spibus_child_pnpinfo_str(device_t bus, device_t child, char *buf, - size_t buflen) -{ - *buf = '\0'; + sbuf_printf(sb, "bus=%d cs=%d", device_get_unit(bus), cs); return (0); } static int spibus_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct spibus_ivar *devi = SPIBUS_IVAR(child); switch (which) { default: return (EINVAL); case SPIBUS_IVAR_CS: *(uint32_t *)result = devi->cs; break; case SPIBUS_IVAR_MODE: *(uint32_t *)result = devi->mode; break; case SPIBUS_IVAR_CLOCK: *(uint32_t *)result = devi->clock; break; } return (0); } static int spibus_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct spibus_ivar *devi = SPIBUS_IVAR(child); if (devi == NULL || device_get_parent(child) != bus) return (EDOOFUS); switch (which) { case SPIBUS_IVAR_CLOCK: /* Any non-zero value is allowed for max clock frequency. */ if (value == 0) return (EINVAL); devi->clock = (uint32_t)value; break; case SPIBUS_IVAR_CS: /* Chip select cannot be changed. */ return (EINVAL); case SPIBUS_IVAR_MODE: /* Valid SPI modes are 0-3. */ if (value > 3) return (EINVAL); devi->mode = (uint32_t)value; break; default: return (EINVAL); } return (0); } static device_t spibus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct spibus_ivar *devi; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(sizeof(struct spibus_ivar), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (0); } resource_list_init(&devi->rl); device_set_ivars(child, devi); return (child); } static void spibus_hinted_child(device_t bus, const char *dname, int dunit) { device_t child; int irq; struct spibus_ivar *devi; child = BUS_ADD_CHILD(bus, 0, dname, dunit); devi = SPIBUS_IVAR(child); devi->mode = SPIBUS_MODE_NONE; resource_int_value(dname, dunit, "clock", &devi->clock); resource_int_value(dname, dunit, "cs", &devi->cs); resource_int_value(dname, dunit, "mode", &devi->mode); if (resource_int_value(dname, dunit, "irq", &irq) == 0) { if (bus_set_resource(child, SYS_RES_IRQ, 0, irq, 1) != 0) device_printf(bus, "Warning: bus_set_resource() failed\n"); } } static struct resource_list * spibus_get_resource_list(device_t bus __unused, device_t child) { struct spibus_ivar *devi; devi = SPIBUS_IVAR(child); return (&devi->rl); } static int spibus_transfer_impl(device_t dev, device_t child, struct spi_command *cmd) { return (SPIBUS_TRANSFER(device_get_parent(dev), child, cmd)); } static device_method_t spibus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, spibus_probe), DEVMETHOD(device_attach, spibus_attach), DEVMETHOD(device_detach, spibus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, spibus_suspend), DEVMETHOD(device_resume, spibus_resume), /* Bus interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource_list, spibus_get_resource_list), DEVMETHOD(bus_add_child, spibus_add_child), DEVMETHOD(bus_print_child, spibus_print_child), DEVMETHOD(bus_probe_nomatch, spibus_probe_nomatch), DEVMETHOD(bus_read_ivar, spibus_read_ivar), DEVMETHOD(bus_write_ivar, spibus_write_ivar), - DEVMETHOD(bus_child_pnpinfo_str, spibus_child_pnpinfo_str), - DEVMETHOD(bus_child_location_str, spibus_child_location_str), + DEVMETHOD(bus_child_location, spibus_child_location), DEVMETHOD(bus_hinted_child, spibus_hinted_child), /* spibus interface */ DEVMETHOD(spibus_transfer, spibus_transfer_impl), DEVMETHOD_END }; driver_t spibus_driver = { "spibus", spibus_methods, sizeof(struct spibus_softc) }; devclass_t spibus_devclass; DRIVER_MODULE(spibus, spi, spibus_driver, spibus_devclass, 0, 0); MODULE_VERSION(spibus, 1); diff --git a/sys/dev/superio/superio.c b/sys/dev/superio/superio.c index b102159c2810..c0624dbc0412 100644 --- a/sys/dev/superio/superio.c +++ b/sys/dev/superio/superio.c @@ -1,1035 +1,1034 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Andriy Gapon * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include "isa_if.h" typedef void (*sio_conf_enter_f)(struct resource*, uint16_t); typedef void (*sio_conf_exit_f)(struct resource*, uint16_t); struct sio_conf_methods { sio_conf_enter_f enter; sio_conf_exit_f exit; superio_vendor_t vendor; }; struct sio_device { uint8_t ldn; superio_dev_type_t type; }; struct superio_devinfo { STAILQ_ENTRY(superio_devinfo) link; struct resource_list resources; device_t dev; uint8_t ldn; superio_dev_type_t type; uint16_t iobase; uint16_t iobase2; uint8_t irq; uint8_t dma; }; struct siosc { struct mtx conf_lock; STAILQ_HEAD(, superio_devinfo) devlist; struct resource* io_res; struct cdev *chardev; int io_rid; uint16_t io_port; const struct sio_conf_methods *methods; const struct sio_device *known_devices; superio_vendor_t vendor; uint16_t devid; uint8_t revid; uint8_t current_ldn; uint8_t ldn_reg; uint8_t enable_reg; }; static d_ioctl_t superio_ioctl; static struct cdevsw superio_cdevsw = { .d_version = D_VERSION, .d_ioctl = superio_ioctl, .d_name = "superio", }; #define NUMPORTS 2 static uint8_t sio_read(struct resource* res, uint8_t reg) { bus_write_1(res, 0, reg); return (bus_read_1(res, 1)); } /* Read a word from two one-byte registers, big endian. */ static uint16_t sio_readw(struct resource* res, uint8_t reg) { uint16_t v; v = sio_read(res, reg); v <<= 8; v |= sio_read(res, reg + 1); return (v); } static void sio_write(struct resource* res, uint8_t reg, uint8_t val) { bus_write_1(res, 0, reg); bus_write_1(res, 1, val); } static void sio_ldn_select(struct siosc *sc, uint8_t ldn) { mtx_assert(&sc->conf_lock, MA_OWNED); if (ldn == sc->current_ldn) return; sio_write(sc->io_res, sc->ldn_reg, ldn); sc->current_ldn = ldn; } static uint8_t sio_ldn_read(struct siosc *sc, uint8_t ldn, uint8_t reg) { mtx_assert(&sc->conf_lock, MA_OWNED); if (reg >= sc->enable_reg) { sio_ldn_select(sc, ldn); KASSERT(sc->current_ldn == ldn, ("sio_ldn_select failed")); } return (sio_read(sc->io_res, reg)); } static uint16_t sio_ldn_readw(struct siosc *sc, uint8_t ldn, uint8_t reg) { mtx_assert(&sc->conf_lock, MA_OWNED); if (reg >= sc->enable_reg) { sio_ldn_select(sc, ldn); KASSERT(sc->current_ldn == ldn, ("sio_ldn_select failed")); } return (sio_readw(sc->io_res, reg)); } static void sio_ldn_write(struct siosc *sc, uint8_t ldn, uint8_t reg, uint8_t val) { mtx_assert(&sc->conf_lock, MA_OWNED); if (reg <= sc->ldn_reg) { printf("ignored attempt to write special register 0x%x\n", reg); return; } sio_ldn_select(sc, ldn); KASSERT(sc->current_ldn == ldn, ("sio_ldn_select failed")); sio_write(sc->io_res, reg, val); } static void sio_conf_enter(struct siosc *sc) { mtx_lock(&sc->conf_lock); sc->methods->enter(sc->io_res, sc->io_port); } static void sio_conf_exit(struct siosc *sc) { sc->methods->exit(sc->io_res, sc->io_port); sc->current_ldn = 0xff; mtx_unlock(&sc->conf_lock); } static void ite_conf_enter(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0x87); bus_write_1(res, 0, 0x01); bus_write_1(res, 0, 0x55); bus_write_1(res, 0, port == 0x2e ? 0x55 : 0xaa); } static void ite_conf_exit(struct resource* res, uint16_t port) { sio_write(res, 0x02, 0x02); } static const struct sio_conf_methods ite_conf_methods = { .enter = ite_conf_enter, .exit = ite_conf_exit, .vendor = SUPERIO_VENDOR_ITE }; static void nvt_conf_enter(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0x87); bus_write_1(res, 0, 0x87); } static void nvt_conf_exit(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0xaa); } static const struct sio_conf_methods nvt_conf_methods = { .enter = nvt_conf_enter, .exit = nvt_conf_exit, .vendor = SUPERIO_VENDOR_NUVOTON }; static void fintek_conf_enter(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0x87); bus_write_1(res, 0, 0x87); } static void fintek_conf_exit(struct resource* res, uint16_t port) { bus_write_1(res, 0, 0xaa); } static const struct sio_conf_methods fintek_conf_methods = { .enter = fintek_conf_enter, .exit = fintek_conf_exit, .vendor = SUPERIO_VENDOR_FINTEK }; static const struct sio_conf_methods * const methods_table[] = { &ite_conf_methods, &nvt_conf_methods, &fintek_conf_methods, NULL }; static const uint16_t ports_table[] = { 0x2e, 0x4e, 0 }; const struct sio_device ite_devices[] = { { .ldn = 4, .type = SUPERIO_DEV_HWM }, { .ldn = 7, .type = SUPERIO_DEV_WDT }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device nvt_devices[] = { { .ldn = 8, .type = SUPERIO_DEV_WDT }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device nct5104_devices[] = { { .ldn = 7, .type = SUPERIO_DEV_GPIO }, { .ldn = 8, .type = SUPERIO_DEV_WDT }, { .ldn = 15, .type = SUPERIO_DEV_GPIO }, { .type = SUPERIO_DEV_NONE }, }; const struct sio_device fintek_devices[] = { { .ldn = 7, .type = SUPERIO_DEV_WDT }, { .type = SUPERIO_DEV_NONE }, }; static const struct { superio_vendor_t vendor; uint16_t devid; uint16_t mask; const char *descr; const struct sio_device *devices; } superio_table[] = { { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8712, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8716, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8718, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8720, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8721, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8726, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8728, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_ITE, .devid = 0x8771, .devices = ite_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x1061, .mask = 0x00, .descr = "Nuvoton NCT5104D/NCT6102D/NCT6106D (rev. A)", .devices = nct5104_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x5200, .mask = 0xff, .descr = "Winbond 83627HF/F/HG/G", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x5900, .mask = 0xff, .descr = "Winbond 83627S", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x6000, .mask = 0xff, .descr = "Winbond 83697HF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x6800, .mask = 0xff, .descr = "Winbond 83697UG", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x7000, .mask = 0xff, .descr = "Winbond 83637HF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x8200, .mask = 0xff, .descr = "Winbond 83627THF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x8500, .mask = 0xff, .descr = "Winbond 83687THF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0x8800, .mask = 0xff, .descr = "Winbond 83627EHF", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xa000, .mask = 0xff, .descr = "Winbond 83627DHG", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xa200, .mask = 0xff, .descr = "Winbond 83627UHG", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xa500, .mask = 0xff, .descr = "Winbond 83667HG", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xb000, .mask = 0xff, .descr = "Winbond 83627DHG-P", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xb300, .mask = 0xff, .descr = "Winbond 83667HG-B", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xb400, .mask = 0xff, .descr = "Nuvoton NCT6775", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc300, .mask = 0xff, .descr = "Nuvoton NCT6776", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc400, .mask = 0xff, .descr = "Nuvoton NCT5104D/NCT6102D/NCT6106D (rev. B+)", .devices = nct5104_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc500, .mask = 0xff, .descr = "Nuvoton NCT6779", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc800, .mask = 0xff, .descr = "Nuvoton NCT6791", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xc900, .mask = 0xff, .descr = "Nuvoton NCT6792", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xd100, .mask = 0xff, .descr = "Nuvoton NCT6793", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_NUVOTON, .devid = 0xd300, .mask = 0xff, .descr = "Nuvoton NCT6795", .devices = nvt_devices, }, { .vendor = SUPERIO_VENDOR_FINTEK, .devid = 0x1210, .mask = 0xff, .descr = "Fintek F81803", .devices = fintek_devices, }, { 0, 0 } }; static const char * devtype_to_str(superio_dev_type_t type) { switch (type) { case SUPERIO_DEV_NONE: return ("none"); case SUPERIO_DEV_HWM: return ("HWM"); case SUPERIO_DEV_WDT: return ("WDT"); case SUPERIO_DEV_GPIO: return ("GPIO"); case SUPERIO_DEV_MAX: return ("invalid"); } return ("invalid"); } static int superio_detect(device_t dev, bool claim, struct siosc *sc) { struct resource *res; rman_res_t port; rman_res_t count; uint16_t devid; uint8_t revid; int error; int rid; int i, m; error = bus_get_resource(dev, SYS_RES_IOPORT, 0, &port, &count); if (error != 0) return (error); if (port > UINT16_MAX || count < NUMPORTS) { device_printf(dev, "unexpected I/O range size\n"); return (ENXIO); } /* * Make a temporary resource reservation for hardware probing. * If we can't get the resources we need then * we need to abort. Possibly this indicates * the resources were used by another device * in which case the probe would have failed anyhow. */ rid = 0; res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (res == NULL) { if (claim) device_printf(dev, "failed to allocate I/O resource\n"); return (ENXIO); } for (m = 0; methods_table[m] != NULL; m++) { methods_table[m]->enter(res, port); if (methods_table[m]->vendor == SUPERIO_VENDOR_ITE) { devid = sio_readw(res, 0x20); revid = sio_read(res, 0x22); } else if (methods_table[m]->vendor == SUPERIO_VENDOR_NUVOTON) { devid = sio_read(res, 0x20); revid = sio_read(res, 0x21); devid = (devid << 8) | revid; } else if (methods_table[m]->vendor == SUPERIO_VENDOR_FINTEK) { devid = sio_read(res, 0x20); revid = sio_read(res, 0x21); devid = (devid << 8) | revid; } else { continue; } methods_table[m]->exit(res, port); for (i = 0; superio_table[i].vendor != 0; i++) { uint16_t mask; mask = superio_table[i].mask; if (superio_table[i].vendor != methods_table[m]->vendor) continue; if ((superio_table[i].devid & ~mask) != (devid & ~mask)) continue; break; } /* Found a matching SuperIO entry. */ if (superio_table[i].vendor != 0) break; } if (methods_table[m] == NULL) error = ENXIO; else error = 0; if (!claim || error != 0) { bus_release_resource(dev, SYS_RES_IOPORT, rid, res); return (error); } sc->methods = methods_table[m]; sc->vendor = sc->methods->vendor; sc->known_devices = superio_table[i].devices; sc->io_res = res; sc->io_rid = rid; sc->io_port = port; sc->devid = devid; sc->revid = revid; KASSERT(sc->vendor == SUPERIO_VENDOR_ITE || sc->vendor == SUPERIO_VENDOR_NUVOTON, ("Only ITE and Nuvoton SuperIO-s are supported")); sc->ldn_reg = 0x07; sc->enable_reg = 0x30; sc->current_ldn = 0xff; /* no device should have this */ if (superio_table[i].descr != NULL) { device_set_desc(dev, superio_table[i].descr); } else if (sc->vendor == SUPERIO_VENDOR_ITE) { char descr[64]; snprintf(descr, sizeof(descr), "ITE IT%4x SuperIO (revision 0x%02x)", sc->devid, sc->revid); device_set_desc_copy(dev, descr); } return (0); } static void superio_identify(driver_t *driver, device_t parent) { device_t child; int i; /* * Don't create child devices if any already exist. * Those could be created via isa hints or if this * driver is loaded, unloaded and then loaded again. */ if (device_find_child(parent, "superio", -1)) { if (bootverbose) printf("superio: device(s) already created\n"); return; } /* * Create a child for each candidate port. * It would be nice if we could somehow clean up those * that this driver fails to probe. */ for (i = 0; ports_table[i] != 0; i++) { child = BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "superio", -1); if (child == NULL) { device_printf(parent, "failed to add superio child\n"); continue; } bus_set_resource(child, SYS_RES_IOPORT, 0, ports_table[i], 2); if (superio_detect(child, false, NULL) != 0) device_delete_child(parent, child); } } static int superio_probe(device_t dev) { struct siosc *sc; int error; /* Make sure we do not claim some ISA PNP device. */ if (isa_get_logicalid(dev) != 0) return (ENXIO); /* * XXX We can populate the softc now only because we return * BUS_PROBE_SPECIFIC */ sc = device_get_softc(dev); error = superio_detect(dev, true, sc); if (error != 0) return (error); return (BUS_PROBE_SPECIFIC); } static void superio_add_known_child(device_t dev, superio_dev_type_t type, uint8_t ldn) { struct siosc *sc = device_get_softc(dev); struct superio_devinfo *dinfo; device_t child; child = BUS_ADD_CHILD(dev, 0, NULL, -1); if (child == NULL) { device_printf(dev, "failed to add child for ldn %d, type %s\n", ldn, devtype_to_str(type)); return; } dinfo = device_get_ivars(child); dinfo->ldn = ldn; dinfo->type = type; sio_conf_enter(sc); dinfo->iobase = sio_ldn_readw(sc, ldn, 0x60); dinfo->iobase2 = sio_ldn_readw(sc, ldn, 0x62); dinfo->irq = sio_ldn_readw(sc, ldn, 0x70); dinfo->dma = sio_ldn_readw(sc, ldn, 0x74); sio_conf_exit(sc); STAILQ_INSERT_TAIL(&sc->devlist, dinfo, link); } static int superio_attach(device_t dev) { struct siosc *sc = device_get_softc(dev); int i; mtx_init(&sc->conf_lock, device_get_nameunit(dev), "superio", MTX_DEF); STAILQ_INIT(&sc->devlist); for (i = 0; sc->known_devices[i].type != SUPERIO_DEV_NONE; i++) { superio_add_known_child(dev, sc->known_devices[i].type, sc->known_devices[i].ldn); } bus_generic_probe(dev); bus_generic_attach(dev); sc->chardev = make_dev(&superio_cdevsw, device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "superio%d", device_get_unit(dev)); if (sc->chardev == NULL) device_printf(dev, "failed to create character device\n"); else sc->chardev->si_drv1 = sc; return (0); } static int superio_detach(device_t dev) { struct siosc *sc = device_get_softc(dev); int error; error = bus_generic_detach(dev); if (error != 0) return (error); if (sc->chardev != NULL) destroy_dev(sc->chardev); device_delete_children(dev); bus_release_resource(dev, SYS_RES_IOPORT, sc->io_rid, sc->io_res); mtx_destroy(&sc->conf_lock); return (0); } static device_t superio_add_child(device_t dev, u_int order, const char *name, int unit) { struct superio_devinfo *dinfo; device_t child; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (NULL); dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (dinfo == NULL) { device_delete_child(dev, child); return (NULL); } dinfo->ldn = 0xff; dinfo->type = SUPERIO_DEV_NONE; dinfo->dev = child; resource_list_init(&dinfo->resources); device_set_ivars(child, dinfo); return (child); } static int superio_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct superio_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case SUPERIO_IVAR_LDN: *result = dinfo->ldn; break; case SUPERIO_IVAR_TYPE: *result = dinfo->type; break; case SUPERIO_IVAR_IOBASE: *result = dinfo->iobase; break; case SUPERIO_IVAR_IOBASE2: *result = dinfo->iobase2; break; case SUPERIO_IVAR_IRQ: *result = dinfo->irq; break; case SUPERIO_IVAR_DMA: *result = dinfo->dma; break; default: return (ENOENT); } return (0); } static int superio_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { switch (which) { case SUPERIO_IVAR_LDN: case SUPERIO_IVAR_TYPE: case SUPERIO_IVAR_IOBASE: case SUPERIO_IVAR_IOBASE2: case SUPERIO_IVAR_IRQ: case SUPERIO_IVAR_DMA: return (EINVAL); default: return (ENOENT); } } static struct resource_list * superio_get_resource_list(device_t dev, device_t child) { struct superio_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } static int superio_printf(struct superio_devinfo *dinfo, const char *fmt, ...) { va_list ap; int retval; retval = printf("superio:%s@ldn%0x2x: ", devtype_to_str(dinfo->type), dinfo->ldn); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } static void superio_child_detached(device_t dev, device_t child) { struct superio_devinfo *dinfo; struct resource_list *rl; dinfo = device_get_ivars(child); rl = &dinfo->resources; if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0) superio_printf(dinfo, "Device leaked IRQ resources\n"); if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0) superio_printf(dinfo, "Device leaked memory resources\n"); if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0) superio_printf(dinfo, "Device leaked I/O resources\n"); } static int -superio_child_location_str(device_t parent, device_t child, char *buf, - size_t buflen) +superio_child_location(device_t parent, device_t child, struct sbuf *sb) { uint8_t ldn; ldn = superio_get_ldn(child); - snprintf(buf, buflen, "ldn=0x%02x", ldn); + sbuf_printf(sb, "ldn=0x%02x", ldn); return (0); } static int -superio_child_pnp_str(device_t parent, device_t child, char *buf, - size_t buflen) +superio_child_pnp(device_t parent, device_t child, struct sbuf *sb) { superio_dev_type_t type; type = superio_get_type(child); - snprintf(buf, buflen, "type=%s", devtype_to_str(type)); + sbuf_printf(sb, "type=%s", devtype_to_str(type)); return (0); } static int superio_print_child(device_t parent, device_t child) { superio_dev_type_t type; uint8_t ldn; int retval; ldn = superio_get_ldn(child); type = superio_get_type(child); retval = bus_print_child_header(parent, child); retval += printf(" at %s ldn 0x%02x", devtype_to_str(type), ldn); retval += bus_print_child_footer(parent, child); return (retval); } superio_vendor_t superio_vendor(device_t dev) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); return (sc->vendor); } uint16_t superio_devid(device_t dev) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); return (sc->devid); } uint8_t superio_revid(device_t dev) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); return (sc->revid); } uint8_t superio_read(device_t dev, uint8_t reg) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); struct superio_devinfo *dinfo = device_get_ivars(dev); uint8_t v; sio_conf_enter(sc); v = sio_ldn_read(sc, dinfo->ldn, reg); sio_conf_exit(sc); return (v); } void superio_write(device_t dev, uint8_t reg, uint8_t val) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); struct superio_devinfo *dinfo = device_get_ivars(dev); sio_conf_enter(sc); sio_ldn_write(sc, dinfo->ldn, reg, val); sio_conf_exit(sc); } bool superio_dev_enabled(device_t dev, uint8_t mask) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); struct superio_devinfo *dinfo = device_get_ivars(dev); uint8_t v; /* GPIO device is always active in ITE chips. */ if (sc->vendor == SUPERIO_VENDOR_ITE && dinfo->ldn == 7) return (true); v = superio_read(dev, sc->enable_reg); return ((v & mask) != 0); } void superio_dev_enable(device_t dev, uint8_t mask) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); struct superio_devinfo *dinfo = device_get_ivars(dev); uint8_t v; /* GPIO device is always active in ITE chips. */ if (sc->vendor == SUPERIO_VENDOR_ITE && dinfo->ldn == 7) return; sio_conf_enter(sc); v = sio_ldn_read(sc, dinfo->ldn, sc->enable_reg); v |= mask; sio_ldn_write(sc, dinfo->ldn, sc->enable_reg, v); sio_conf_exit(sc); } void superio_dev_disable(device_t dev, uint8_t mask) { device_t sio_dev = device_get_parent(dev); struct siosc *sc = device_get_softc(sio_dev); struct superio_devinfo *dinfo = device_get_ivars(dev); uint8_t v; /* GPIO device is always active in ITE chips. */ if (sc->vendor == SUPERIO_VENDOR_ITE && dinfo->ldn == 7) return; sio_conf_enter(sc); v = sio_ldn_read(sc, dinfo->ldn, sc->enable_reg); v &= ~mask; sio_ldn_write(sc, dinfo->ldn, sc->enable_reg, v); sio_conf_exit(sc); } device_t superio_find_dev(device_t superio, superio_dev_type_t type, int ldn) { struct siosc *sc = device_get_softc(superio); struct superio_devinfo *dinfo; if (ldn < -1 || ldn > UINT8_MAX) return (NULL); /* ERANGE */ if (type == SUPERIO_DEV_NONE && ldn == -1) return (NULL); /* EINVAL */ STAILQ_FOREACH(dinfo, &sc->devlist, link) { if (ldn != -1 && dinfo->ldn != ldn) continue; if (type != SUPERIO_DEV_NONE && dinfo->type != type) continue; return (dinfo->dev); } return (NULL); } static int superio_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td) { struct siosc *sc; struct superiocmd *s; sc = dev->si_drv1; s = (struct superiocmd *)data; switch (cmd) { case SUPERIO_CR_READ: sio_conf_enter(sc); s->val = sio_ldn_read(sc, s->ldn, s->cr); sio_conf_exit(sc); return (0); case SUPERIO_CR_WRITE: sio_conf_enter(sc); sio_ldn_write(sc, s->ldn, s->cr, s->val); sio_conf_exit(sc); return (0); default: return (ENOTTY); } } static devclass_t superio_devclass; static device_method_t superio_methods[] = { DEVMETHOD(device_identify, superio_identify), DEVMETHOD(device_probe, superio_probe), DEVMETHOD(device_attach, superio_attach), DEVMETHOD(device_detach, superio_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(bus_add_child, superio_add_child), DEVMETHOD(bus_child_detached, superio_child_detached), - DEVMETHOD(bus_child_location_str, superio_child_location_str), - DEVMETHOD(bus_child_pnpinfo_str, superio_child_pnp_str), + DEVMETHOD(bus_child_location, superio_child_location), + DEVMETHOD(bus_child_pnpinfo, superio_child_pnp), DEVMETHOD(bus_print_child, superio_print_child), DEVMETHOD(bus_read_ivar, superio_read_ivar), DEVMETHOD(bus_write_ivar, superio_write_ivar), DEVMETHOD(bus_get_resource_list, superio_get_resource_list), DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD_END }; static driver_t superio_driver = { "superio", superio_methods, sizeof(struct siosc) }; DRIVER_MODULE(superio, isa, superio_driver, superio_devclass, 0, 0); MODULE_VERSION(superio, 1); diff --git a/sys/dev/usb/usb_hub.c b/sys/dev/usb/usb_hub.c index 7f11eea2500d..1f0c053ad674 100644 --- a/sys/dev/usb/usb_hub.c +++ b/sys/dev/usb/usb_hub.c @@ -1,2976 +1,2958 @@ /* $FreeBSD$ */ /*- * SPDX-License-Identifier: BSD-2-Clause-NetBSD * * Copyright (c) 1998 The NetBSD Foundation, Inc. All rights reserved. * Copyright (c) 1998 Lennart Augustsson. All rights reserved. * Copyright (c) 2008-2010 Hans Petter Selasky. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * USB spec: http://www.usb.org/developers/docs/usbspec.zip */ #ifdef USB_GLOBAL_INCLUDE_FILE #include USB_GLOBAL_INCLUDE_FILE #else #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #define USB_DEBUG_VAR uhub_debug #include #include #include #include #include #include #include #include #include #include #include #include #endif /* USB_GLOBAL_INCLUDE_FILE */ #include #ifdef USB_DEBUG static int uhub_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, uhub, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "USB HUB"); SYSCTL_INT(_hw_usb_uhub, OID_AUTO, debug, CTLFLAG_RWTUN, &uhub_debug, 0, "Debug level"); #endif #if USB_HAVE_POWERD static int usb_power_timeout = 30; /* seconds */ SYSCTL_INT(_hw_usb, OID_AUTO, power_timeout, CTLFLAG_RWTUN, &usb_power_timeout, 0, "USB power timeout"); #endif #if USB_HAVE_DISABLE_ENUM static int usb_disable_enumeration = 0; SYSCTL_INT(_hw_usb, OID_AUTO, disable_enumeration, CTLFLAG_RWTUN, &usb_disable_enumeration, 0, "Set to disable all USB device enumeration. " "This can secure against USB devices turning evil, " "for example a USB memory stick becoming a USB keyboard."); static int usb_disable_port_power = 0; SYSCTL_INT(_hw_usb, OID_AUTO, disable_port_power, CTLFLAG_RWTUN, &usb_disable_port_power, 0, "Set to disable all USB port power."); #endif #define UHUB_PROTO(sc) ((sc)->sc_udev->ddesc.bDeviceProtocol) #define UHUB_IS_HIGH_SPEED(sc) (UHUB_PROTO(sc) != UDPROTO_FSHUB) #define UHUB_IS_SINGLE_TT(sc) (UHUB_PROTO(sc) == UDPROTO_HSHUBSTT) #define UHUB_IS_MULTI_TT(sc) (UHUB_PROTO(sc) == UDPROTO_HSHUBMTT) #define UHUB_IS_SUPER_SPEED(sc) (UHUB_PROTO(sc) == UDPROTO_SSHUB) /* prototypes for type checking: */ static device_suspend_t uhub_suspend; static device_resume_t uhub_resume; static bus_driver_added_t uhub_driver_added; -static bus_child_pnpinfo_str_t uhub_child_pnpinfo_string; +static bus_child_pnpinfo_t uhub_child_pnpinfo; static usb_callback_t uhub_intr_callback; #if USB_HAVE_TT_SUPPORT static usb_callback_t uhub_reset_tt_callback; #endif static void usb_dev_resume_peer(struct usb_device *udev); static void usb_dev_suspend_peer(struct usb_device *udev); static uint8_t usb_peer_should_wakeup(struct usb_device *udev); static const struct usb_config uhub_config[UHUB_N_TRANSFER] = { [UHUB_INTR_TRANSFER] = { .type = UE_INTERRUPT, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_ANY, .timeout = 0, .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .bufsize = 0, /* use wMaxPacketSize */ .callback = &uhub_intr_callback, .interval = UHUB_INTR_INTERVAL, }, #if USB_HAVE_TT_SUPPORT [UHUB_RESET_TT_TRANSFER] = { .type = UE_CONTROL, .endpoint = 0x00, /* Control pipe */ .direction = UE_DIR_ANY, .bufsize = sizeof(struct usb_device_request), .callback = &uhub_reset_tt_callback, .timeout = 1000, /* 1 second */ .usb_mode = USB_MODE_HOST, }, #endif }; /* * driver instance for "hub" connected to "usb" * and "hub" connected to "hub" */ static devclass_t uhub_devclass; static device_method_t uhub_methods[] = { DEVMETHOD(device_probe, uhub_probe), DEVMETHOD(device_attach, uhub_attach), DEVMETHOD(device_detach, uhub_detach), DEVMETHOD(device_suspend, uhub_suspend), DEVMETHOD(device_resume, uhub_resume), - DEVMETHOD(bus_child_location_str, uhub_child_location_string), - DEVMETHOD(bus_child_pnpinfo_str, uhub_child_pnpinfo_string), + DEVMETHOD(bus_child_location, uhub_child_location), + DEVMETHOD(bus_child_pnpinfo, uhub_child_pnpinfo), DEVMETHOD(bus_driver_added, uhub_driver_added), DEVMETHOD_END }; driver_t uhub_driver = { .name = "uhub", .methods = uhub_methods, .size = sizeof(struct uhub_softc) }; DRIVER_MODULE(uhub, usbus, uhub_driver, uhub_devclass, 0, 0); DRIVER_MODULE(uhub, uhub, uhub_driver, uhub_devclass, NULL, 0); MODULE_VERSION(uhub, 1); static void uhub_intr_callback(struct usb_xfer *xfer, usb_error_t error) { struct uhub_softc *sc = usbd_xfer_softc(xfer); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(2, "\n"); /* * This is an indication that some port * has changed status. Notify the bus * event handler thread that we need * to be explored again: */ usb_needs_explore(sc->sc_udev->bus, 0); case USB_ST_SETUP: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); break; default: /* Error */ if (xfer->error != USB_ERR_CANCELLED) { /* * Do a clear-stall. The "stall_pipe" flag * will get cleared before next callback by * the USB stack. */ usbd_xfer_set_stall(xfer); usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); } break; } } /*------------------------------------------------------------------------* * uhub_reset_tt_proc * * This function starts the TT reset USB request *------------------------------------------------------------------------*/ #if USB_HAVE_TT_SUPPORT static void uhub_reset_tt_proc(struct usb_proc_msg *_pm) { struct usb_udev_msg *pm = (void *)_pm; struct usb_device *udev = pm->udev; struct usb_hub *hub; struct uhub_softc *sc; hub = udev->hub; if (hub == NULL) return; sc = hub->hubsoftc; if (sc == NULL) return; /* Change lock */ USB_BUS_UNLOCK(udev->bus); USB_MTX_LOCK(&sc->sc_mtx); /* Start transfer */ usbd_transfer_start(sc->sc_xfer[UHUB_RESET_TT_TRANSFER]); /* Change lock */ USB_MTX_UNLOCK(&sc->sc_mtx); USB_BUS_LOCK(udev->bus); } #endif /*------------------------------------------------------------------------* * uhub_tt_buffer_reset_async_locked * * This function queues a TT reset for the given USB device and endpoint. *------------------------------------------------------------------------*/ #if USB_HAVE_TT_SUPPORT void uhub_tt_buffer_reset_async_locked(struct usb_device *child, struct usb_endpoint *ep) { struct usb_device_request req; struct usb_device *udev; struct usb_hub *hub; struct usb_port *up; uint16_t wValue; uint8_t port; if (child == NULL || ep == NULL) return; udev = child->parent_hs_hub; port = child->hs_port_no; if (udev == NULL) return; hub = udev->hub; if ((hub == NULL) || (udev->speed != USB_SPEED_HIGH) || (child->speed != USB_SPEED_LOW && child->speed != USB_SPEED_FULL) || (child->flags.usb_mode != USB_MODE_HOST) || (port == 0) || (ep->edesc == NULL)) { /* not applicable */ return; } USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED); up = hub->ports + port - 1; if (udev->ddesc.bDeviceClass == UDCLASS_HUB && udev->ddesc.bDeviceProtocol == UDPROTO_HSHUBSTT) port = 1; /* if we already received a clear buffer request, reset the whole TT */ if (up->req_reset_tt.bRequest != 0) { req.bmRequestType = UT_WRITE_CLASS_OTHER; req.bRequest = UR_RESET_TT; USETW(req.wValue, 0); req.wIndex[0] = port; req.wIndex[1] = 0; USETW(req.wLength, 0); } else { wValue = (ep->edesc->bEndpointAddress & 0xF) | ((child->address & 0x7F) << 4) | ((ep->edesc->bEndpointAddress & 0x80) << 8) | ((ep->edesc->bmAttributes & 3) << 12); req.bmRequestType = UT_WRITE_CLASS_OTHER; req.bRequest = UR_CLEAR_TT_BUFFER; USETW(req.wValue, wValue); req.wIndex[0] = port; req.wIndex[1] = 0; USETW(req.wLength, 0); } up->req_reset_tt = req; /* get reset transfer started */ usb_proc_msignal(USB_BUS_TT_PROC(udev->bus), &hub->tt_msg[0], &hub->tt_msg[1]); } #endif #if USB_HAVE_TT_SUPPORT static void uhub_reset_tt_callback(struct usb_xfer *xfer, usb_error_t error) { struct uhub_softc *sc; struct usb_device *udev; struct usb_port *up; uint8_t x; DPRINTF("TT buffer reset\n"); sc = usbd_xfer_softc(xfer); udev = sc->sc_udev; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: case USB_ST_SETUP: tr_setup: USB_BUS_LOCK(udev->bus); /* find first port which needs a TT reset */ for (x = 0; x != udev->hub->nports; x++) { up = udev->hub->ports + x; if (up->req_reset_tt.bRequest == 0) continue; /* copy in the transfer */ usbd_copy_in(xfer->frbuffers, 0, &up->req_reset_tt, sizeof(up->req_reset_tt)); /* reset buffer */ memset(&up->req_reset_tt, 0, sizeof(up->req_reset_tt)); /* set length */ usbd_xfer_set_frame_len(xfer, 0, sizeof(up->req_reset_tt)); xfer->nframes = 1; USB_BUS_UNLOCK(udev->bus); usbd_transfer_submit(xfer); return; } USB_BUS_UNLOCK(udev->bus); break; default: if (error == USB_ERR_CANCELLED) break; DPRINTF("TT buffer reset failed (%s)\n", usbd_errstr(error)); goto tr_setup; } } #endif /*------------------------------------------------------------------------* * uhub_count_active_host_ports * * This function counts the number of active ports at the given speed. *------------------------------------------------------------------------*/ uint8_t uhub_count_active_host_ports(struct usb_device *udev, enum usb_dev_speed speed) { struct uhub_softc *sc; struct usb_device *child; struct usb_hub *hub; struct usb_port *up; uint8_t retval = 0; uint8_t x; if (udev == NULL) goto done; hub = udev->hub; if (hub == NULL) goto done; sc = hub->hubsoftc; if (sc == NULL) goto done; for (x = 0; x != hub->nports; x++) { up = hub->ports + x; child = usb_bus_port_get_device(udev->bus, up); if (child != NULL && child->flags.usb_mode == USB_MODE_HOST && child->speed == speed) retval++; } done: return (retval); } void uhub_explore_handle_re_enumerate(struct usb_device *child) { uint8_t do_unlock; usb_error_t err; /* check if device should be re-enumerated */ if (child->flags.usb_mode != USB_MODE_HOST) return; do_unlock = usbd_enum_lock(child); switch (child->re_enumerate_wait) { case USB_RE_ENUM_START: err = usbd_set_config_index(child, USB_UNCONFIG_INDEX); if (err != 0) { DPRINTF("Unconfigure failed: %s: Ignored.\n", usbd_errstr(err)); } if (child->parent_hub == NULL) { /* the root HUB cannot be re-enumerated */ DPRINTFN(6, "cannot reset root HUB\n"); err = 0; } else { err = usbd_req_re_enumerate(child, NULL); } if (err == 0) { /* refresh device strings */ usb_get_langid(child); usb_set_device_strings(child); /* set default configuration */ err = usbd_set_config_index(child, 0); } if (err == 0) { err = usb_probe_and_attach(child, USB_IFACE_INDEX_ANY); } child->re_enumerate_wait = USB_RE_ENUM_DONE; break; case USB_RE_ENUM_PWR_OFF: /* get the device unconfigured */ err = usbd_set_config_index(child, USB_UNCONFIG_INDEX); if (err) { DPRINTFN(0, "Could not unconfigure " "device (ignored)\n"); } if (child->parent_hub == NULL) { /* the root HUB cannot be re-enumerated */ DPRINTFN(6, "cannot set port feature\n"); err = 0; } else { /* clear port enable */ err = usbd_req_clear_port_feature(child->parent_hub, NULL, child->port_no, UHF_PORT_ENABLE); if (err) { DPRINTFN(0, "Could not disable port " "(ignored)\n"); } } child->re_enumerate_wait = USB_RE_ENUM_DONE; break; case USB_RE_ENUM_SET_CONFIG: err = usbd_set_config_index(child, child->next_config_index); if (err != 0) { DPRINTF("Configure failed: %s: Ignored.\n", usbd_errstr(err)); } else { err = usb_probe_and_attach(child, USB_IFACE_INDEX_ANY); } child->re_enumerate_wait = USB_RE_ENUM_DONE; break; default: child->re_enumerate_wait = USB_RE_ENUM_DONE; break; } if (do_unlock) usbd_enum_unlock(child); } /*------------------------------------------------------------------------* * uhub_explore_sub - subroutine * * Return values: * 0: Success * Else: A control transaction failed *------------------------------------------------------------------------*/ static usb_error_t uhub_explore_sub(struct uhub_softc *sc, struct usb_port *up) { struct usb_bus *bus; struct usb_device *child; uint8_t refcount; usb_error_t err; bus = sc->sc_udev->bus; err = USB_ERR_NORMAL_COMPLETION; /* get driver added refcount from USB bus */ refcount = bus->driver_added_refcount; /* get device assosiated with the given port */ child = usb_bus_port_get_device(bus, up); if (child == NULL) { /* nothing to do */ goto done; } uhub_explore_handle_re_enumerate(child); /* check if probe and attach should be done */ if (child->driver_added_refcount != refcount) { child->driver_added_refcount = refcount; err = usb_probe_and_attach(child, USB_IFACE_INDEX_ANY); if (err) { goto done; } } /* start control transfer, if device mode */ if (child->flags.usb_mode == USB_MODE_DEVICE) usbd_ctrl_transfer_setup(child); /* if a HUB becomes present, do a recursive HUB explore */ if (child->hub) err = (child->hub->explore) (child); done: return (err); } /*------------------------------------------------------------------------* * uhub_read_port_status - factored out code *------------------------------------------------------------------------*/ static usb_error_t uhub_read_port_status(struct uhub_softc *sc, uint8_t portno) { struct usb_port_status ps; usb_error_t err; if (sc->sc_usb_port_errors >= UHUB_USB_PORT_ERRORS_MAX) { DPRINTFN(4, "port %d, HUB looks dead, too many errors\n", portno); sc->sc_st.port_status = 0; sc->sc_st.port_change = 0; return (USB_ERR_TIMEOUT); } err = usbd_req_get_port_status( sc->sc_udev, NULL, &ps, portno); if (err == 0) { sc->sc_st.port_status = UGETW(ps.wPortStatus); sc->sc_st.port_change = UGETW(ps.wPortChange); sc->sc_usb_port_errors = 0; } else { sc->sc_st.port_status = 0; sc->sc_st.port_change = 0; sc->sc_usb_port_errors++; } /* debugging print */ DPRINTFN(4, "port %d, wPortStatus=0x%04x, " "wPortChange=0x%04x, err=%s\n", portno, sc->sc_st.port_status, sc->sc_st.port_change, usbd_errstr(err)); return (err); } /*------------------------------------------------------------------------* * uhub_reattach_port * * Returns: * 0: Success * Else: A control transaction failed *------------------------------------------------------------------------*/ static usb_error_t uhub_reattach_port(struct uhub_softc *sc, uint8_t portno) { struct usb_device *child; struct usb_device *udev; enum usb_dev_speed speed; enum usb_hc_mode mode; usb_error_t err; uint16_t power_mask; uint8_t timeout; DPRINTF("reattaching port %d\n", portno); timeout = 0; udev = sc->sc_udev; child = usb_bus_port_get_device(udev->bus, udev->hub->ports + portno - 1); repeat: /* first clear the port connection change bit */ err = usbd_req_clear_port_feature(udev, NULL, portno, UHF_C_PORT_CONNECTION); if (err) goto error; /* check if there is a child */ if (child != NULL) { /* * Free USB device and all subdevices, if any. */ usb_free_device(child, 0); child = NULL; } /* get fresh status */ err = uhub_read_port_status(sc, portno); if (err) goto error; #if USB_HAVE_DISABLE_ENUM /* check if we should skip enumeration from this USB HUB */ if (usb_disable_enumeration != 0 || sc->sc_disable_enumeration != 0) { DPRINTF("Enumeration is disabled!\n"); goto error; } #endif /* check if nothing is connected to the port */ if (!(sc->sc_st.port_status & UPS_CURRENT_CONNECT_STATUS)) goto error; /* check if there is no power on the port and print a warning */ switch (udev->speed) { case USB_SPEED_HIGH: case USB_SPEED_FULL: case USB_SPEED_LOW: power_mask = UPS_PORT_POWER; break; case USB_SPEED_SUPER: if (udev->parent_hub == NULL) power_mask = 0; /* XXX undefined */ else power_mask = UPS_PORT_POWER_SS; break; default: power_mask = 0; break; } if ((sc->sc_st.port_status & power_mask) != power_mask) { DPRINTF("WARNING: strange, connected port %d " "has no power\n", portno); } /* check if the device is in Host Mode */ if (!(sc->sc_st.port_status & UPS_PORT_MODE_DEVICE)) { DPRINTF("Port %d is in Host Mode\n", portno); if (sc->sc_st.port_status & UPS_SUSPEND) { /* * NOTE: Should not get here in SuperSpeed * mode, because the HUB should report this * bit as zero. */ DPRINTF("Port %d was still " "suspended, clearing.\n", portno); err = usbd_req_clear_port_feature(udev, NULL, portno, UHF_PORT_SUSPEND); } /* USB Host Mode */ /* wait for maximum device power up time */ usb_pause_mtx(NULL, USB_MS_TO_TICKS(usb_port_powerup_delay)); /* reset port, which implies enabling it */ err = usbd_req_reset_port(udev, NULL, portno); if (err) { DPRINTFN(0, "port %d reset " "failed, error=%s\n", portno, usbd_errstr(err)); goto error; } /* get port status again, it might have changed during reset */ err = uhub_read_port_status(sc, portno); if (err) { goto error; } /* check if something changed during port reset */ if ((sc->sc_st.port_change & UPS_C_CONNECT_STATUS) || (!(sc->sc_st.port_status & UPS_CURRENT_CONNECT_STATUS))) { if (timeout) { DPRINTFN(1, "giving up port %d reset - " "device vanished: change %#x status %#x\n", portno, sc->sc_st.port_change, sc->sc_st.port_status); goto error; } timeout = 1; goto repeat; } } else { DPRINTF("Port %d is in Device Mode\n", portno); } /* * Figure out the device speed */ switch (udev->speed) { case USB_SPEED_HIGH: if (sc->sc_st.port_status & UPS_HIGH_SPEED) speed = USB_SPEED_HIGH; else if (sc->sc_st.port_status & UPS_LOW_SPEED) speed = USB_SPEED_LOW; else speed = USB_SPEED_FULL; break; case USB_SPEED_FULL: if (sc->sc_st.port_status & UPS_LOW_SPEED) speed = USB_SPEED_LOW; else speed = USB_SPEED_FULL; break; case USB_SPEED_LOW: speed = USB_SPEED_LOW; break; case USB_SPEED_SUPER: if (udev->parent_hub == NULL) { /* Root HUB - special case */ switch (sc->sc_st.port_status & UPS_OTHER_SPEED) { case 0: speed = USB_SPEED_FULL; break; case UPS_LOW_SPEED: speed = USB_SPEED_LOW; break; case UPS_HIGH_SPEED: speed = USB_SPEED_HIGH; break; default: speed = USB_SPEED_SUPER; break; } } else { speed = USB_SPEED_SUPER; } break; default: /* same speed like parent */ speed = udev->speed; break; } if (speed == USB_SPEED_SUPER) { err = usbd_req_set_hub_u1_timeout(udev, NULL, portno, 128 - (2 * udev->depth)); if (err) { DPRINTFN(0, "port %d U1 timeout " "failed, error=%s\n", portno, usbd_errstr(err)); } err = usbd_req_set_hub_u2_timeout(udev, NULL, portno, 128 - (2 * udev->depth)); if (err) { DPRINTFN(0, "port %d U2 timeout " "failed, error=%s\n", portno, usbd_errstr(err)); } } /* * Figure out the device mode * * NOTE: This part is currently FreeBSD specific. */ if (udev->parent_hub != NULL) { /* inherit mode from the parent HUB */ mode = udev->parent_hub->flags.usb_mode; } else if (sc->sc_st.port_status & UPS_PORT_MODE_DEVICE) mode = USB_MODE_DEVICE; else mode = USB_MODE_HOST; /* need to create a new child */ child = usb_alloc_device(sc->sc_dev, udev->bus, udev, udev->depth + 1, portno - 1, portno, speed, mode); if (child == NULL) { DPRINTFN(0, "could not allocate new device\n"); goto error; } return (0); /* success */ error: if (child != NULL) { /* * Free USB device and all subdevices, if any. */ usb_free_device(child, 0); child = NULL; } if (err == 0) { if (sc->sc_st.port_status & UPS_PORT_ENABLED) { err = usbd_req_clear_port_feature( sc->sc_udev, NULL, portno, UHF_PORT_ENABLE); } } if (err) { DPRINTFN(0, "device problem (%s), " "disabling port %d\n", usbd_errstr(err), portno); } return (err); } /*------------------------------------------------------------------------* * usb_device_20_compatible * * Returns: * 0: HUB does not support suspend and resume * Else: HUB supports suspend and resume *------------------------------------------------------------------------*/ static uint8_t usb_device_20_compatible(struct usb_device *udev) { if (udev == NULL) return (0); switch (udev->speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: case USB_SPEED_HIGH: return (1); default: return (0); } } /*------------------------------------------------------------------------* * uhub_suspend_resume_port * * Returns: * 0: Success * Else: A control transaction failed *------------------------------------------------------------------------*/ static usb_error_t uhub_suspend_resume_port(struct uhub_softc *sc, uint8_t portno) { struct usb_device *child; struct usb_device *udev; uint8_t is_suspend; usb_error_t err; DPRINTF("port %d\n", portno); udev = sc->sc_udev; child = usb_bus_port_get_device(udev->bus, udev->hub->ports + portno - 1); /* first clear the port suspend change bit */ if (usb_device_20_compatible(udev)) { err = usbd_req_clear_port_feature(udev, NULL, portno, UHF_C_PORT_SUSPEND); } else { err = usbd_req_clear_port_feature(udev, NULL, portno, UHF_C_PORT_LINK_STATE); } if (err) { DPRINTF("clearing suspend failed.\n"); goto done; } /* get fresh status */ err = uhub_read_port_status(sc, portno); if (err) { DPRINTF("reading port status failed.\n"); goto done; } /* convert current state */ if (usb_device_20_compatible(udev)) { if (sc->sc_st.port_status & UPS_SUSPEND) { is_suspend = 1; } else { is_suspend = 0; } } else { switch (UPS_PORT_LINK_STATE_GET(sc->sc_st.port_status)) { case UPS_PORT_LS_U3: is_suspend = 1; break; case UPS_PORT_LS_SS_INA: usbd_req_warm_reset_port(udev, NULL, portno); is_suspend = 0; break; default: is_suspend = 0; break; } } DPRINTF("suspended=%u\n", is_suspend); /* do the suspend or resume */ if (child) { /* * This code handle two cases: 1) Host Mode - we can only * receive resume here 2) Device Mode - we can receive * suspend and resume here */ if (is_suspend == 0) usb_dev_resume_peer(child); else if (child->flags.usb_mode == USB_MODE_DEVICE) usb_dev_suspend_peer(child); } done: return (err); } /*------------------------------------------------------------------------* * uhub_root_interrupt * * This function is called when a Root HUB interrupt has * happened. "ptr" and "len" makes up the Root HUB interrupt * packet. This function is called having the "bus_mtx" locked. *------------------------------------------------------------------------*/ void uhub_root_intr(struct usb_bus *bus, const uint8_t *ptr, uint8_t len) { USB_BUS_LOCK_ASSERT(bus, MA_OWNED); usb_needs_explore(bus, 0); } static uint8_t uhub_is_too_deep(struct usb_device *udev) { switch (udev->speed) { case USB_SPEED_FULL: case USB_SPEED_LOW: case USB_SPEED_HIGH: if (udev->depth > USB_HUB_MAX_DEPTH) return (1); break; case USB_SPEED_SUPER: if (udev->depth > USB_SS_HUB_DEPTH_MAX) return (1); break; default: break; } return (0); } /*------------------------------------------------------------------------* * uhub_explore * * Returns: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ static usb_error_t uhub_explore(struct usb_device *udev) { struct usb_hub *hub; struct uhub_softc *sc; struct usb_port *up; usb_error_t err; uint8_t portno; uint8_t x; uint8_t do_unlock; hub = udev->hub; sc = hub->hubsoftc; DPRINTFN(11, "udev=%p addr=%d\n", udev, udev->address); /* ignore devices that are too deep */ if (uhub_is_too_deep(udev)) return (USB_ERR_TOO_DEEP); /* check if device is suspended */ if (udev->flags.self_suspended) { /* need to wait until the child signals resume */ DPRINTF("Device is suspended!\n"); return (USB_ERR_NORMAL_COMPLETION); } /* * Make sure we don't race against user-space applications * like LibUSB: */ do_unlock = usbd_enum_lock(udev); /* * Set default error code to avoid compiler warnings. * Note that hub->nports cannot be zero. */ err = USB_ERR_NORMAL_COMPLETION; for (x = 0; x != hub->nports; x++) { up = hub->ports + x; portno = x + 1; err = uhub_read_port_status(sc, portno); if (err) { /* most likely the HUB is gone */ break; } if (sc->sc_st.port_change & UPS_C_OVERCURRENT_INDICATOR) { DPRINTF("Overcurrent on port %u.\n", portno); err = usbd_req_clear_port_feature( udev, NULL, portno, UHF_C_PORT_OVER_CURRENT); if (err) { /* most likely the HUB is gone */ break; } } if (!(sc->sc_flags & UHUB_FLAG_DID_EXPLORE)) { /* * Fake a connect status change so that the * status gets checked initially! */ sc->sc_st.port_change |= UPS_C_CONNECT_STATUS; } if (sc->sc_st.port_change & UPS_C_PORT_ENABLED) { err = usbd_req_clear_port_feature( udev, NULL, portno, UHF_C_PORT_ENABLE); if (err) { /* most likely the HUB is gone */ break; } if (sc->sc_st.port_change & UPS_C_CONNECT_STATUS) { /* * Ignore the port error if the device * has vanished ! */ } else if (sc->sc_st.port_status & UPS_PORT_ENABLED) { DPRINTFN(0, "illegal enable change, " "port %d\n", portno); } else { if (up->restartcnt == USB_RESTART_MAX) { /* XXX could try another speed ? */ DPRINTFN(0, "port error, giving up " "port %d\n", portno); } else { sc->sc_st.port_change |= UPS_C_CONNECT_STATUS; up->restartcnt++; } } } if (sc->sc_st.port_change & UPS_C_CONNECT_STATUS) { err = uhub_reattach_port(sc, portno); if (err) { /* most likely the HUB is gone */ break; } } if (sc->sc_st.port_change & (UPS_C_SUSPEND | UPS_C_PORT_LINK_STATE)) { err = uhub_suspend_resume_port(sc, portno); if (err) { /* most likely the HUB is gone */ break; } } if (uhub_explore_sub(sc, up) == USB_ERR_NORMAL_COMPLETION) { /* explore succeeded - reset restart counter */ up->restartcnt = 0; } } if (do_unlock) usbd_enum_unlock(udev); /* initial status checked */ sc->sc_flags |= UHUB_FLAG_DID_EXPLORE; return (err); } int uhub_probe(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); /* * The subclass for USB HUBs is currently ignored because it * is 0 for some and 1 for others. */ if (uaa->info.bConfigIndex == 0 && uaa->info.bDeviceClass == UDCLASS_HUB) return (BUS_PROBE_DEFAULT); return (ENXIO); } /* NOTE: The information returned by this function can be wrong. */ usb_error_t uhub_query_info(struct usb_device *udev, uint8_t *pnports, uint8_t *ptt) { struct usb_hub_descriptor hubdesc20; struct usb_hub_ss_descriptor hubdesc30; usb_error_t err; uint8_t nports; uint8_t tt; if (udev->ddesc.bDeviceClass != UDCLASS_HUB) return (USB_ERR_INVAL); nports = 0; tt = 0; switch (udev->speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: case USB_SPEED_HIGH: /* assuming that there is one port */ err = usbd_req_get_hub_descriptor(udev, NULL, &hubdesc20, 1); if (err) { DPRINTFN(0, "getting USB 2.0 HUB descriptor failed," "error=%s\n", usbd_errstr(err)); break; } nports = hubdesc20.bNbrPorts; if (nports > 127) nports = 127; if (udev->speed == USB_SPEED_HIGH) tt = (UGETW(hubdesc20.wHubCharacteristics) >> 5) & 3; break; case USB_SPEED_SUPER: err = usbd_req_get_ss_hub_descriptor(udev, NULL, &hubdesc30, 1); if (err) { DPRINTFN(0, "Getting USB 3.0 HUB descriptor failed," "error=%s\n", usbd_errstr(err)); break; } nports = hubdesc30.bNbrPorts; if (nports > 16) nports = 16; break; default: err = USB_ERR_INVAL; break; } if (pnports != NULL) *pnports = nports; if (ptt != NULL) *ptt = tt; return (err); } int uhub_attach(device_t dev) { struct uhub_softc *sc = device_get_softc(dev); struct usb_attach_arg *uaa = device_get_ivars(dev); struct usb_device *udev = uaa->device; struct usb_device *parent_hub = udev->parent_hub; struct usb_hub *hub; struct usb_hub_descriptor hubdesc20; struct usb_hub_ss_descriptor hubdesc30; #if USB_HAVE_DISABLE_ENUM struct sysctl_ctx_list *sysctl_ctx; struct sysctl_oid *sysctl_tree; #endif uint16_t pwrdly; uint16_t nports; uint8_t x; uint8_t portno; uint8_t removable; uint8_t iface_index; usb_error_t err; sc->sc_udev = udev; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "USB HUB mutex", NULL, MTX_DEF); device_set_usb_desc(dev); DPRINTFN(2, "depth=%d selfpowered=%d, parent=%p, " "parent->selfpowered=%d\n", udev->depth, udev->flags.self_powered, parent_hub, parent_hub ? parent_hub->flags.self_powered : 0); if (uhub_is_too_deep(udev)) { DPRINTFN(0, "HUB at depth %d, " "exceeds maximum. HUB ignored\n", (int)udev->depth); goto error; } if (!udev->flags.self_powered && parent_hub && !parent_hub->flags.self_powered) { DPRINTFN(0, "Bus powered HUB connected to " "bus powered HUB. HUB ignored\n"); goto error; } if (UHUB_IS_MULTI_TT(sc)) { err = usbd_set_alt_interface_index(udev, 0, 1); if (err) { device_printf(dev, "MTT could not be enabled\n"); goto error; } device_printf(dev, "MTT enabled\n"); } /* get HUB descriptor */ DPRINTFN(2, "Getting HUB descriptor\n"); switch (udev->speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: case USB_SPEED_HIGH: /* assuming that there is one port */ err = usbd_req_get_hub_descriptor(udev, NULL, &hubdesc20, 1); if (err) { DPRINTFN(0, "getting USB 2.0 HUB descriptor failed," "error=%s\n", usbd_errstr(err)); goto error; } /* get number of ports */ nports = hubdesc20.bNbrPorts; /* get power delay */ pwrdly = ((hubdesc20.bPwrOn2PwrGood * UHD_PWRON_FACTOR) + usb_extra_power_up_time); /* get complete HUB descriptor */ if (nports >= 8) { /* check number of ports */ if (nports > 127) { DPRINTFN(0, "Invalid number of USB 2.0 ports," "error=%s\n", usbd_errstr(err)); goto error; } /* get complete HUB descriptor */ err = usbd_req_get_hub_descriptor(udev, NULL, &hubdesc20, nports); if (err) { DPRINTFN(0, "Getting USB 2.0 HUB descriptor failed," "error=%s\n", usbd_errstr(err)); goto error; } if (hubdesc20.bNbrPorts != nports) { DPRINTFN(0, "Number of ports changed\n"); goto error; } } break; case USB_SPEED_SUPER: if (udev->parent_hub != NULL) { err = usbd_req_set_hub_depth(udev, NULL, udev->depth - 1); if (err) { DPRINTFN(0, "Setting USB 3.0 HUB depth failed," "error=%s\n", usbd_errstr(err)); goto error; } } err = usbd_req_get_ss_hub_descriptor(udev, NULL, &hubdesc30, 1); if (err) { DPRINTFN(0, "Getting USB 3.0 HUB descriptor failed," "error=%s\n", usbd_errstr(err)); goto error; } /* get number of ports */ nports = hubdesc30.bNbrPorts; /* get power delay */ pwrdly = ((hubdesc30.bPwrOn2PwrGood * UHD_PWRON_FACTOR) + usb_extra_power_up_time); /* get complete HUB descriptor */ if (nports >= 8) { /* check number of ports */ if (nports > ((udev->parent_hub != NULL) ? 15 : 127)) { DPRINTFN(0, "Invalid number of USB 3.0 ports," "error=%s\n", usbd_errstr(err)); goto error; } /* get complete HUB descriptor */ err = usbd_req_get_ss_hub_descriptor(udev, NULL, &hubdesc30, nports); if (err) { DPRINTFN(0, "Getting USB 2.0 HUB descriptor failed," "error=%s\n", usbd_errstr(err)); goto error; } if (hubdesc30.bNbrPorts != nports) { DPRINTFN(0, "Number of ports changed\n"); goto error; } } break; default: DPRINTF("Assuming HUB has only one port\n"); /* default number of ports */ nports = 1; /* default power delay */ pwrdly = ((10 * UHD_PWRON_FACTOR) + usb_extra_power_up_time); break; } if (nports == 0) { DPRINTFN(0, "portless HUB\n"); goto error; } if (nports > USB_MAX_PORTS) { DPRINTF("Port limit exceeded\n"); goto error; } #if (USB_HAVE_FIXED_PORT == 0) hub = malloc(sizeof(hub[0]) + (sizeof(hub->ports[0]) * nports), M_USBDEV, M_WAITOK | M_ZERO); if (hub == NULL) goto error; #else hub = &sc->sc_hub; #endif udev->hub = hub; /* initialize HUB structure */ hub->hubsoftc = sc; hub->explore = &uhub_explore; hub->nports = nports; hub->hubudev = udev; #if USB_HAVE_TT_SUPPORT hub->tt_msg[0].hdr.pm_callback = &uhub_reset_tt_proc; hub->tt_msg[0].udev = udev; hub->tt_msg[1].hdr.pm_callback = &uhub_reset_tt_proc; hub->tt_msg[1].udev = udev; #endif /* if self powered hub, give ports maximum current */ if (udev->flags.self_powered) { hub->portpower = USB_MAX_POWER; } else { hub->portpower = USB_MIN_POWER; } /* set up interrupt pipe */ iface_index = 0; if (udev->parent_hub == NULL) { /* root HUB is special */ err = 0; } else { /* normal HUB */ err = usbd_transfer_setup(udev, &iface_index, sc->sc_xfer, uhub_config, UHUB_N_TRANSFER, sc, &sc->sc_mtx); } if (err) { DPRINTFN(0, "cannot setup interrupt transfer, " "errstr=%s\n", usbd_errstr(err)); goto error; } /* wait with power off for a while */ usb_pause_mtx(NULL, USB_MS_TO_TICKS(USB_POWER_DOWN_TIME)); #if USB_HAVE_DISABLE_ENUM /* Add device sysctls */ sysctl_ctx = device_get_sysctl_ctx(dev); sysctl_tree = device_get_sysctl_tree(dev); if (sysctl_ctx != NULL && sysctl_tree != NULL) { (void) SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "disable_enumeration", CTLFLAG_RWTUN, &sc->sc_disable_enumeration, 0, "Set to disable enumeration on this USB HUB."); (void) SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "disable_port_power", CTLFLAG_RWTUN, &sc->sc_disable_port_power, 0, "Set to disable USB port power on this USB HUB."); } #endif /* * To have the best chance of success we do things in the exact same * order as Windoze98. This should not be necessary, but some * devices do not follow the USB specs to the letter. * * These are the events on the bus when a hub is attached: * Get device and config descriptors (see attach code) * Get hub descriptor (see above) * For all ports * turn on power * wait for power to become stable * (all below happens in explore code) * For all ports * clear C_PORT_CONNECTION * For all ports * get port status * if device connected * wait 100 ms * turn on reset * wait * clear C_PORT_RESET * get port status * proceed with device attachment */ /* XXX should check for none, individual, or ganged power? */ removable = 0; for (x = 0; x != nports; x++) { /* set up data structures */ struct usb_port *up = hub->ports + x; up->device_index = 0; up->restartcnt = 0; portno = x + 1; /* check if port is removable */ switch (udev->speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: case USB_SPEED_HIGH: if (!UHD_NOT_REMOV(&hubdesc20, portno)) removable++; break; case USB_SPEED_SUPER: if (!UHD_NOT_REMOV(&hubdesc30, portno)) removable++; break; default: DPRINTF("Assuming removable port\n"); removable++; break; } if (err == 0) { #if USB_HAVE_DISABLE_ENUM /* check if we should disable USB port power or not */ if (usb_disable_port_power != 0 || sc->sc_disable_port_power != 0) { /* turn the power off */ DPRINTFN(2, "Turning port %d power off\n", portno); err = usbd_req_clear_port_feature(udev, NULL, portno, UHF_PORT_POWER); } else { #endif /* turn the power on */ DPRINTFN(2, "Turning port %d power on\n", portno); err = usbd_req_set_port_feature(udev, NULL, portno, UHF_PORT_POWER); #if USB_HAVE_DISABLE_ENUM } #endif } if (err != 0) { DPRINTFN(0, "port %d power on or off failed, %s\n", portno, usbd_errstr(err)); } DPRINTF("turn on port %d power\n", portno); /* wait for stable power */ usb_pause_mtx(NULL, USB_MS_TO_TICKS(pwrdly)); } device_printf(dev, "%d port%s with %d " "removable, %s powered\n", nports, (nports != 1) ? "s" : "", removable, udev->flags.self_powered ? "self" : "bus"); /* Start the interrupt endpoint, if any */ USB_MTX_LOCK(&sc->sc_mtx); usbd_transfer_start(sc->sc_xfer[UHUB_INTR_TRANSFER]); USB_MTX_UNLOCK(&sc->sc_mtx); /* Enable automatic power save on all USB HUBs */ usbd_set_power_mode(udev, USB_POWER_MODE_SAVE); return (0); error: usbd_transfer_unsetup(sc->sc_xfer, UHUB_N_TRANSFER); #if (USB_HAVE_FIXED_PORT == 0) free(udev->hub, M_USBDEV); #endif udev->hub = NULL; mtx_destroy(&sc->sc_mtx); return (ENXIO); } /* * Called from process context when the hub is gone. * Detach all devices on active ports. */ int uhub_detach(device_t dev) { struct uhub_softc *sc = device_get_softc(dev); struct usb_hub *hub = sc->sc_udev->hub; struct usb_bus *bus = sc->sc_udev->bus; struct usb_device *child; uint8_t x; if (hub == NULL) /* must be partially working */ return (0); /* Make sure interrupt transfer is gone. */ usbd_transfer_unsetup(sc->sc_xfer, UHUB_N_TRANSFER); /* Detach all ports */ for (x = 0; x != hub->nports; x++) { child = usb_bus_port_get_device(bus, hub->ports + x); if (child == NULL) { continue; } /* * Free USB device and all subdevices, if any. */ usb_free_device(child, 0); } #if USB_HAVE_TT_SUPPORT /* Make sure our TT messages are not queued anywhere */ USB_BUS_LOCK(bus); usb_proc_mwait(USB_BUS_TT_PROC(bus), &hub->tt_msg[0], &hub->tt_msg[1]); USB_BUS_UNLOCK(bus); #endif #if (USB_HAVE_FIXED_PORT == 0) free(hub, M_USBDEV); #endif sc->sc_udev->hub = NULL; mtx_destroy(&sc->sc_mtx); return (0); } static int uhub_suspend(device_t dev) { DPRINTF("\n"); /* Sub-devices are not suspended here! */ return (0); } static int uhub_resume(device_t dev) { DPRINTF("\n"); /* Sub-devices are not resumed here! */ return (0); } static void uhub_driver_added(device_t dev, driver_t *driver) { usb_needs_explore_all(); } void uhub_find_iface_index(struct usb_hub *hub, device_t child, struct hub_result *res) { struct usb_interface *iface; struct usb_device *udev; uint8_t nports; uint8_t x; uint8_t i; nports = hub->nports; for (x = 0; x != nports; x++) { udev = usb_bus_port_get_device(hub->hubudev->bus, hub->ports + x); if (!udev) { continue; } for (i = 0; i != USB_IFACE_MAX; i++) { iface = usbd_get_iface(udev, i); if (iface && (iface->subdev == child)) { res->iface_index = i; res->udev = udev; res->portno = x + 1; return; } } } res->iface_index = 0; res->udev = NULL; res->portno = 0; } int -uhub_child_location_string(device_t parent, device_t child, - char *buf, size_t buflen) +uhub_child_location(device_t parent, device_t child, struct sbuf *sb) { struct uhub_softc *sc; struct usb_hub *hub; struct hub_result res; - if (!device_is_attached(parent)) { - if (buflen) - buf[0] = 0; + if (!device_is_attached(parent)) return (0); - } sc = device_get_softc(parent); hub = sc->sc_udev->hub; mtx_lock(&Giant); uhub_find_iface_index(hub, child, &res); if (!res.udev) { DPRINTF("device not on hub\n"); - if (buflen) { - buf[0] = '\0'; - } goto done; } - snprintf(buf, buflen, "bus=%u hubaddr=%u port=%u devaddr=%u" + sbuf_printf(sb, "bus=%u hubaddr=%u port=%u devaddr=%u" " interface=%u" #if USB_HAVE_UGEN " ugen=%s" #endif , device_get_unit(res.udev->bus->bdev) , (res.udev->parent_hub != NULL) ? res.udev->parent_hub->device_index : 0 , res.portno, res.udev->device_index, res.iface_index #if USB_HAVE_UGEN , res.udev->ugen_name #endif ); done: mtx_unlock(&Giant); return (0); } static int -uhub_child_pnpinfo_string(device_t parent, device_t child, - char *buf, size_t buflen) +uhub_child_pnpinfo(device_t parent, device_t child, struct sbuf*sb) { struct uhub_softc *sc; struct usb_hub *hub; struct usb_interface *iface; struct hub_result res; uint8_t do_unlock; - if (!device_is_attached(parent)) { - if (buflen) - buf[0] = 0; + if (!device_is_attached(parent)) return (0); - } sc = device_get_softc(parent); hub = sc->sc_udev->hub; mtx_lock(&Giant); uhub_find_iface_index(hub, child, &res); if (!res.udev) { DPRINTF("device not on hub\n"); - if (buflen) { - buf[0] = '\0'; - } goto done; } iface = usbd_get_iface(res.udev, res.iface_index); if (iface && iface->idesc) { /* Make sure device information is not changed during the print. */ do_unlock = usbd_ctrl_lock(res.udev); - snprintf(buf, buflen, "vendor=0x%04x product=0x%04x " + sbuf_printf(sb, "vendor=0x%04x product=0x%04x " "devclass=0x%02x devsubclass=0x%02x " "devproto=0x%02x " "sernum=\"%s\" " "release=0x%04x " "mode=%s " "intclass=0x%02x intsubclass=0x%02x " "intprotocol=0x%02x" "%s%s", UGETW(res.udev->ddesc.idVendor), UGETW(res.udev->ddesc.idProduct), res.udev->ddesc.bDeviceClass, res.udev->ddesc.bDeviceSubClass, res.udev->ddesc.bDeviceProtocol, usb_get_serial(res.udev), UGETW(res.udev->ddesc.bcdDevice), (res.udev->flags.usb_mode == USB_MODE_HOST) ? "host" : "device", iface->idesc->bInterfaceClass, iface->idesc->bInterfaceSubClass, iface->idesc->bInterfaceProtocol, iface->pnpinfo ? " " : "", iface->pnpinfo ? iface->pnpinfo : ""); if (do_unlock) usbd_ctrl_unlock(res.udev); - } else { - if (buflen) { - buf[0] = '\0'; - } - goto done; } done: mtx_unlock(&Giant); return (0); } /* * The USB Transaction Translator: * =============================== * * When doing LOW- and FULL-speed USB transfers across a HIGH-speed * USB HUB, bandwidth must be allocated for ISOCHRONOUS and INTERRUPT * USB transfers. To utilize bandwidth dynamically the "scatter and * gather" principle must be applied. This means that bandwidth must * be divided into equal parts of bandwidth. With regard to USB all * data is transferred in smaller packets with length * "wMaxPacketSize". The problem however is that "wMaxPacketSize" is * not a constant! * * The bandwidth scheduler which I have implemented will simply pack * the USB transfers back to back until there is no more space in the * schedule. Out of the 8 microframes which the USB 2.0 standard * provides, only 6 are available for non-HIGH-speed devices. I have * reserved the first 4 microframes for ISOCHRONOUS transfers. The * last 2 microframes I have reserved for INTERRUPT transfers. Without * this division, it is very difficult to allocate and free bandwidth * dynamically. * * NOTE about the Transaction Translator in USB HUBs: * * USB HUBs have a very simple Transaction Translator, that will * simply pipeline all the SPLIT transactions. That means that the * transactions will be executed in the order they are queued! * */ /*------------------------------------------------------------------------* * usb_intr_find_best_slot * * Return value: * The best Transaction Translation slot for an interrupt endpoint. *------------------------------------------------------------------------*/ static uint8_t usb_intr_find_best_slot(usb_size_t *ptr, uint8_t start, uint8_t end, uint8_t mask) { usb_size_t min = (usb_size_t)-1; usb_size_t sum; uint8_t x; uint8_t y; uint8_t z; y = 0; /* find the last slot with lesser used bandwidth */ for (x = start; x < end; x++) { sum = 0; /* compute sum of bandwidth */ for (z = x; z < end; z++) { if (mask & (1U << (z - x))) sum += ptr[z]; } /* check if the current multi-slot is more optimal */ if (min >= sum) { min = sum; y = x; } /* check if the mask is about to be shifted out */ if (mask & (1U << (end - 1 - x))) break; } return (y); } /*------------------------------------------------------------------------* * usb_hs_bandwidth_adjust * * This function will update the bandwidth usage for the microframe * having index "slot" by "len" bytes. "len" can be negative. If the * "slot" argument is greater or equal to "USB_HS_MICRO_FRAMES_MAX" * the "slot" argument will be replaced by the slot having least used * bandwidth. The "mask" argument is used for multi-slot allocations. * * Returns: * The slot in which the bandwidth update was done: 0..7 *------------------------------------------------------------------------*/ static uint8_t usb_hs_bandwidth_adjust(struct usb_device *udev, int16_t len, uint8_t slot, uint8_t mask) { struct usb_bus *bus = udev->bus; struct usb_hub *hub; enum usb_dev_speed speed; uint8_t x; USB_BUS_LOCK_ASSERT(bus, MA_OWNED); speed = usbd_get_speed(udev); switch (speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: if (speed == USB_SPEED_LOW) { len *= 8; } /* * The Host Controller Driver should have * performed checks so that the lookup * below does not result in a NULL pointer * access. */ hub = udev->parent_hs_hub->hub; if (slot >= USB_HS_MICRO_FRAMES_MAX) { slot = usb_intr_find_best_slot(hub->uframe_usage, USB_FS_ISOC_UFRAME_MAX, 6, mask); } for (x = slot; x < 8; x++) { if (mask & (1U << (x - slot))) { hub->uframe_usage[x] += len; bus->uframe_usage[x] += len; } } break; default: if (slot >= USB_HS_MICRO_FRAMES_MAX) { slot = usb_intr_find_best_slot(bus->uframe_usage, 0, USB_HS_MICRO_FRAMES_MAX, mask); } for (x = slot; x < 8; x++) { if (mask & (1U << (x - slot))) { bus->uframe_usage[x] += len; } } break; } return (slot); } /*------------------------------------------------------------------------* * usb_hs_bandwidth_alloc * * This function is a wrapper function for "usb_hs_bandwidth_adjust()". *------------------------------------------------------------------------*/ void usb_hs_bandwidth_alloc(struct usb_xfer *xfer) { struct usb_device *udev; uint8_t slot; uint8_t mask; uint8_t speed; udev = xfer->xroot->udev; if (udev->flags.usb_mode != USB_MODE_HOST) return; /* not supported */ xfer->endpoint->refcount_bw++; if (xfer->endpoint->refcount_bw != 1) return; /* already allocated */ speed = usbd_get_speed(udev); switch (xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE) { case UE_INTERRUPT: /* allocate a microframe slot */ mask = 0x01; slot = usb_hs_bandwidth_adjust(udev, xfer->max_frame_size, USB_HS_MICRO_FRAMES_MAX, mask); xfer->endpoint->usb_uframe = slot; xfer->endpoint->usb_smask = mask << slot; if ((speed != USB_SPEED_FULL) && (speed != USB_SPEED_LOW)) { xfer->endpoint->usb_cmask = 0x00 ; } else { xfer->endpoint->usb_cmask = (-(0x04 << slot)) & 0xFE; } break; case UE_ISOCHRONOUS: switch (usbd_xfer_get_fps_shift(xfer)) { case 0: mask = 0xFF; break; case 1: mask = 0x55; break; case 2: mask = 0x11; break; default: mask = 0x01; break; } /* allocate a microframe multi-slot */ slot = usb_hs_bandwidth_adjust(udev, xfer->max_frame_size, USB_HS_MICRO_FRAMES_MAX, mask); xfer->endpoint->usb_uframe = slot; xfer->endpoint->usb_cmask = 0; xfer->endpoint->usb_smask = mask << slot; break; default: xfer->endpoint->usb_uframe = 0; xfer->endpoint->usb_cmask = 0; xfer->endpoint->usb_smask = 0; break; } DPRINTFN(11, "slot=%d, mask=0x%02x\n", xfer->endpoint->usb_uframe, xfer->endpoint->usb_smask >> xfer->endpoint->usb_uframe); } /*------------------------------------------------------------------------* * usb_hs_bandwidth_free * * This function is a wrapper function for "usb_hs_bandwidth_adjust()". *------------------------------------------------------------------------*/ void usb_hs_bandwidth_free(struct usb_xfer *xfer) { struct usb_device *udev; uint8_t slot; uint8_t mask; udev = xfer->xroot->udev; if (udev->flags.usb_mode != USB_MODE_HOST) return; /* not supported */ xfer->endpoint->refcount_bw--; if (xfer->endpoint->refcount_bw != 0) return; /* still allocated */ switch (xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE) { case UE_INTERRUPT: case UE_ISOCHRONOUS: slot = xfer->endpoint->usb_uframe; mask = xfer->endpoint->usb_smask; /* free microframe slot(s): */ usb_hs_bandwidth_adjust(udev, -xfer->max_frame_size, slot, mask >> slot); DPRINTFN(11, "slot=%d, mask=0x%02x\n", slot, mask >> slot); xfer->endpoint->usb_uframe = 0; xfer->endpoint->usb_cmask = 0; xfer->endpoint->usb_smask = 0; break; default: break; } } /*------------------------------------------------------------------------* * usb_isoc_time_expand * * This function will expand the time counter from 7-bit to 16-bit. * * Returns: * 16-bit isochronous time counter. *------------------------------------------------------------------------*/ uint16_t usb_isoc_time_expand(struct usb_bus *bus, uint16_t isoc_time_curr) { uint16_t rem; USB_BUS_LOCK_ASSERT(bus, MA_OWNED); rem = bus->isoc_time_last & (USB_ISOC_TIME_MAX - 1); isoc_time_curr &= (USB_ISOC_TIME_MAX - 1); if (isoc_time_curr < rem) { /* the time counter wrapped around */ bus->isoc_time_last += USB_ISOC_TIME_MAX; } /* update the remainder */ bus->isoc_time_last &= ~(USB_ISOC_TIME_MAX - 1); bus->isoc_time_last |= isoc_time_curr; return (bus->isoc_time_last); } /*------------------------------------------------------------------------* * usbd_fs_isoc_schedule_alloc_slot * * This function will allocate bandwidth for an isochronous FULL speed * transaction in the FULL speed schedule. * * Returns: * <8: Success * Else: Error *------------------------------------------------------------------------*/ #if USB_HAVE_TT_SUPPORT uint8_t usbd_fs_isoc_schedule_alloc_slot(struct usb_xfer *isoc_xfer, uint16_t isoc_time) { struct usb_xfer *xfer; struct usb_xfer *pipe_xfer; struct usb_bus *bus; usb_frlength_t len; usb_frlength_t data_len; uint16_t delta; uint16_t slot; uint8_t retval; data_len = 0; slot = 0; bus = isoc_xfer->xroot->bus; TAILQ_FOREACH(xfer, &bus->intr_q.head, wait_entry) { /* skip self, if any */ if (xfer == isoc_xfer) continue; /* check if this USB transfer is going through the same TT */ if (xfer->xroot->udev->parent_hs_hub != isoc_xfer->xroot->udev->parent_hs_hub) { continue; } if ((isoc_xfer->xroot->udev->parent_hs_hub-> ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT) && (xfer->xroot->udev->hs_port_no != isoc_xfer->xroot->udev->hs_port_no)) { continue; } if (xfer->endpoint->methods != isoc_xfer->endpoint->methods) continue; /* check if isoc_time is part of this transfer */ delta = xfer->isoc_time_complete - isoc_time; if (delta > 0 && delta <= xfer->nframes) { delta = xfer->nframes - delta; len = xfer->frlengths[delta]; len += 8; len *= 7; len /= 6; data_len += len; } /* * Check double buffered transfers. Only stream ID * equal to zero is valid here! */ TAILQ_FOREACH(pipe_xfer, &xfer->endpoint->endpoint_q[0].head, wait_entry) { /* skip self, if any */ if (pipe_xfer == isoc_xfer) continue; /* check if isoc_time is part of this transfer */ delta = pipe_xfer->isoc_time_complete - isoc_time; if (delta > 0 && delta <= pipe_xfer->nframes) { delta = pipe_xfer->nframes - delta; len = pipe_xfer->frlengths[delta]; len += 8; len *= 7; len /= 6; data_len += len; } } } while (data_len >= USB_FS_BYTES_PER_HS_UFRAME) { data_len -= USB_FS_BYTES_PER_HS_UFRAME; slot++; } /* check for overflow */ if (slot >= USB_FS_ISOC_UFRAME_MAX) return (255); retval = slot; delta = isoc_xfer->isoc_time_complete - isoc_time; if (delta > 0 && delta <= isoc_xfer->nframes) { delta = isoc_xfer->nframes - delta; len = isoc_xfer->frlengths[delta]; len += 8; len *= 7; len /= 6; data_len += len; } while (data_len >= USB_FS_BYTES_PER_HS_UFRAME) { data_len -= USB_FS_BYTES_PER_HS_UFRAME; slot++; } /* check for overflow */ if (slot >= USB_FS_ISOC_UFRAME_MAX) return (255); return (retval); } #endif /*------------------------------------------------------------------------* * usb_bus_port_get_device * * This function is NULL safe. *------------------------------------------------------------------------*/ struct usb_device * usb_bus_port_get_device(struct usb_bus *bus, struct usb_port *up) { if ((bus == NULL) || (up == NULL)) { /* be NULL safe */ return (NULL); } if (up->device_index == 0) { /* nothing to do */ return (NULL); } return (bus->devices[up->device_index]); } /*------------------------------------------------------------------------* * usb_bus_port_set_device * * This function is NULL safe. *------------------------------------------------------------------------*/ void usb_bus_port_set_device(struct usb_bus *bus, struct usb_port *up, struct usb_device *udev, uint8_t device_index) { if (bus == NULL) { /* be NULL safe */ return; } /* * There is only one case where we don't * have an USB port, and that is the Root Hub! */ if (up) { if (udev) { up->device_index = device_index; } else { device_index = up->device_index; up->device_index = 0; } } /* * Make relationships to our new device */ if (device_index != 0) { #if USB_HAVE_UGEN mtx_lock(&usb_ref_lock); #endif bus->devices[device_index] = udev; #if USB_HAVE_UGEN mtx_unlock(&usb_ref_lock); #endif } /* * Debug print */ DPRINTFN(2, "bus %p devices[%u] = %p\n", bus, device_index, udev); } /*------------------------------------------------------------------------* * usb_needs_explore * * This functions is called when the USB event thread needs to run. *------------------------------------------------------------------------*/ void usb_needs_explore(struct usb_bus *bus, uint8_t do_probe) { uint8_t do_unlock; DPRINTF("\n"); if (cold != 0) { DPRINTF("Cold\n"); return; } if (bus == NULL) { DPRINTF("No bus pointer!\n"); return; } if ((bus->devices == NULL) || (bus->devices[USB_ROOT_HUB_ADDR] == NULL)) { DPRINTF("No root HUB\n"); return; } if (mtx_owned(&bus->bus_mtx)) { do_unlock = 0; } else { USB_BUS_LOCK(bus); do_unlock = 1; } if (do_probe) { bus->do_probe = 1; } if (usb_proc_msignal(USB_BUS_EXPLORE_PROC(bus), &bus->explore_msg[0], &bus->explore_msg[1])) { /* ignore */ } if (do_unlock) { USB_BUS_UNLOCK(bus); } } /*------------------------------------------------------------------------* * usb_needs_explore_all * * This function is called whenever a new driver is loaded and will * cause that all USB buses are re-explored. *------------------------------------------------------------------------*/ void usb_needs_explore_all(void) { struct usb_bus *bus; devclass_t dc; device_t dev; int max; DPRINTFN(3, "\n"); dc = usb_devclass_ptr; if (dc == NULL) { DPRINTFN(0, "no devclass\n"); return; } /* * Explore all USB buses in parallel. */ max = devclass_get_maxunit(dc); while (max >= 0) { dev = devclass_get_device(dc, max); if (dev) { bus = device_get_softc(dev); if (bus) { usb_needs_explore(bus, 1); } } max--; } } /*------------------------------------------------------------------------* * usb_needs_explore_init * * This function will ensure that the USB controllers are not enumerated * until the "cold" variable is cleared. *------------------------------------------------------------------------*/ static void usb_needs_explore_init(void *arg) { /* * The cold variable should be cleared prior to this function * being called: */ if (cold == 0) usb_needs_explore_all(); else DPRINTFN(-1, "Cold variable is still set!\n"); } SYSINIT(usb_needs_explore_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, usb_needs_explore_init, NULL); /*------------------------------------------------------------------------* * usb_bus_power_update * * This function will ensure that all USB devices on the given bus are * properly suspended or resumed according to the device transfer * state. *------------------------------------------------------------------------*/ #if USB_HAVE_POWERD void usb_bus_power_update(struct usb_bus *bus) { usb_needs_explore(bus, 0 /* no probe */ ); } #endif /*------------------------------------------------------------------------* * usbd_transfer_power_ref * * This function will modify the power save reference counts and * wakeup the USB device associated with the given USB transfer, if * needed. *------------------------------------------------------------------------*/ #if USB_HAVE_POWERD void usbd_transfer_power_ref(struct usb_xfer *xfer, int val) { static const usb_power_mask_t power_mask[4] = { [UE_CONTROL] = USB_HW_POWER_CONTROL, [UE_BULK] = USB_HW_POWER_BULK, [UE_INTERRUPT] = USB_HW_POWER_INTERRUPT, [UE_ISOCHRONOUS] = USB_HW_POWER_ISOC, }; struct usb_device *udev; uint8_t needs_explore; uint8_t needs_hw_power; uint8_t xfer_type; udev = xfer->xroot->udev; if (udev->device_index == USB_ROOT_HUB_ADDR) { /* no power save for root HUB */ return; } USB_BUS_LOCK(udev->bus); xfer_type = xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE; udev->pwr_save.last_xfer_time = ticks; udev->pwr_save.type_refs[xfer_type] += val; if (xfer->flags_int.control_xfr) { udev->pwr_save.read_refs += val; if (xfer->flags_int.usb_mode == USB_MODE_HOST) { /* * It is not allowed to suspend during a * control transfer: */ udev->pwr_save.write_refs += val; } } else if (USB_GET_DATA_ISREAD(xfer)) { udev->pwr_save.read_refs += val; } else { udev->pwr_save.write_refs += val; } if (val > 0) { if (udev->flags.self_suspended) needs_explore = usb_peer_should_wakeup(udev); else needs_explore = 0; if (!(udev->bus->hw_power_state & power_mask[xfer_type])) { DPRINTF("Adding type %u to power state\n", xfer_type); udev->bus->hw_power_state |= power_mask[xfer_type]; needs_hw_power = 1; } else { needs_hw_power = 0; } } else { needs_explore = 0; needs_hw_power = 0; } USB_BUS_UNLOCK(udev->bus); if (needs_explore) { DPRINTF("update\n"); usb_bus_power_update(udev->bus); } else if (needs_hw_power) { DPRINTF("needs power\n"); if (udev->bus->methods->set_hw_power != NULL) { (udev->bus->methods->set_hw_power) (udev->bus); } } } #endif /*------------------------------------------------------------------------* * usb_peer_should_wakeup * * This function returns non-zero if the current device should wake up. *------------------------------------------------------------------------*/ static uint8_t usb_peer_should_wakeup(struct usb_device *udev) { return (((udev->power_mode == USB_POWER_MODE_ON) && (udev->flags.usb_mode == USB_MODE_HOST)) || (udev->driver_added_refcount != udev->bus->driver_added_refcount) || (udev->re_enumerate_wait != USB_RE_ENUM_DONE) || (udev->pwr_save.type_refs[UE_ISOCHRONOUS] != 0) || (udev->pwr_save.write_refs != 0) || ((udev->pwr_save.read_refs != 0) && (udev->flags.usb_mode == USB_MODE_HOST) && (usb_peer_can_wakeup(udev) == 0))); } /*------------------------------------------------------------------------* * usb_bus_powerd * * This function implements the USB power daemon and is called * regularly from the USB explore thread. *------------------------------------------------------------------------*/ #if USB_HAVE_POWERD void usb_bus_powerd(struct usb_bus *bus) { struct usb_device *udev; usb_ticks_t temp; usb_ticks_t limit; usb_ticks_t mintime; usb_size_t type_refs[5]; uint8_t x; limit = usb_power_timeout; if (limit == 0) limit = hz; else if (limit > 255) limit = 255 * hz; else limit = limit * hz; DPRINTF("bus=%p\n", bus); USB_BUS_LOCK(bus); /* * The root HUB device is never suspended * and we simply skip it. */ for (x = USB_ROOT_HUB_ADDR + 1; x != bus->devices_max; x++) { udev = bus->devices[x]; if (udev == NULL) continue; temp = ticks - udev->pwr_save.last_xfer_time; if (usb_peer_should_wakeup(udev)) { /* check if we are suspended */ if (udev->flags.self_suspended != 0) { USB_BUS_UNLOCK(bus); usb_dev_resume_peer(udev); USB_BUS_LOCK(bus); } } else if ((temp >= limit) && (udev->flags.usb_mode == USB_MODE_HOST) && (udev->flags.self_suspended == 0)) { /* try to do suspend */ USB_BUS_UNLOCK(bus); usb_dev_suspend_peer(udev); USB_BUS_LOCK(bus); } } /* reset counters */ mintime = (usb_ticks_t)-1; type_refs[0] = 0; type_refs[1] = 0; type_refs[2] = 0; type_refs[3] = 0; type_refs[4] = 0; /* Re-loop all the devices to get the actual state */ for (x = USB_ROOT_HUB_ADDR + 1; x != bus->devices_max; x++) { udev = bus->devices[x]; if (udev == NULL) continue; /* we found a non-Root-Hub USB device */ type_refs[4] += 1; /* "last_xfer_time" can be updated by a resume */ temp = ticks - udev->pwr_save.last_xfer_time; /* * Compute minimum time since last transfer for the complete * bus: */ if (temp < mintime) mintime = temp; if (udev->flags.self_suspended == 0) { type_refs[0] += udev->pwr_save.type_refs[0]; type_refs[1] += udev->pwr_save.type_refs[1]; type_refs[2] += udev->pwr_save.type_refs[2]; type_refs[3] += udev->pwr_save.type_refs[3]; } } if (mintime >= (usb_ticks_t)(1 * hz)) { /* recompute power masks */ DPRINTF("Recomputing power masks\n"); bus->hw_power_state = 0; if (type_refs[UE_CONTROL] != 0) bus->hw_power_state |= USB_HW_POWER_CONTROL; if (type_refs[UE_BULK] != 0) bus->hw_power_state |= USB_HW_POWER_BULK; if (type_refs[UE_INTERRUPT] != 0) bus->hw_power_state |= USB_HW_POWER_INTERRUPT; if (type_refs[UE_ISOCHRONOUS] != 0) bus->hw_power_state |= USB_HW_POWER_ISOC; if (type_refs[4] != 0) bus->hw_power_state |= USB_HW_POWER_NON_ROOT_HUB; } USB_BUS_UNLOCK(bus); if (bus->methods->set_hw_power != NULL) { /* always update hardware power! */ (bus->methods->set_hw_power) (bus); } return; } #endif static usb_error_t usbd_device_30_remote_wakeup(struct usb_device *udev, uint8_t bRequest) { struct usb_device_request req = {}; req.bmRequestType = UT_WRITE_INTERFACE; req.bRequest = bRequest; USETW(req.wValue, USB_INTERFACE_FUNC_SUSPEND); USETW(req.wIndex, USB_INTERFACE_FUNC_SUSPEND_LP | USB_INTERFACE_FUNC_SUSPEND_RW); return (usbd_do_request(udev, NULL, &req, 0)); } static usb_error_t usbd_clear_dev_wakeup(struct usb_device *udev) { usb_error_t err; if (usb_device_20_compatible(udev)) { err = usbd_req_clear_device_feature(udev, NULL, UF_DEVICE_REMOTE_WAKEUP); } else { err = usbd_device_30_remote_wakeup(udev, UR_CLEAR_FEATURE); } return (err); } static usb_error_t usbd_set_dev_wakeup(struct usb_device *udev) { usb_error_t err; if (usb_device_20_compatible(udev)) { err = usbd_req_set_device_feature(udev, NULL, UF_DEVICE_REMOTE_WAKEUP); } else { err = usbd_device_30_remote_wakeup(udev, UR_SET_FEATURE); } return (err); } /*------------------------------------------------------------------------* * usb_dev_resume_peer * * This function will resume an USB peer and do the required USB * signalling to get an USB device out of the suspended state. *------------------------------------------------------------------------*/ static void usb_dev_resume_peer(struct usb_device *udev) { struct usb_bus *bus; int err; /* be NULL safe */ if (udev == NULL) return; /* check if already resumed */ if (udev->flags.self_suspended == 0) return; /* we need a parent HUB to do resume */ if (udev->parent_hub == NULL) return; DPRINTF("udev=%p\n", udev); if ((udev->flags.usb_mode == USB_MODE_DEVICE) && (udev->flags.remote_wakeup == 0)) { /* * If the host did not set the remote wakeup feature, we can * not wake it up either! */ DPRINTF("remote wakeup is not set!\n"); return; } /* get bus pointer */ bus = udev->bus; /* resume parent hub first */ usb_dev_resume_peer(udev->parent_hub); /* reduce chance of instant resume failure by waiting a little bit */ usb_pause_mtx(NULL, USB_MS_TO_TICKS(20)); if (usb_device_20_compatible(udev)) { /* resume current port (Valid in Host and Device Mode) */ err = usbd_req_clear_port_feature(udev->parent_hub, NULL, udev->port_no, UHF_PORT_SUSPEND); if (err) { DPRINTFN(0, "Resuming port failed\n"); return; } } else { /* resume current port (Valid in Host and Device Mode) */ err = usbd_req_set_port_link_state(udev->parent_hub, NULL, udev->port_no, UPS_PORT_LS_U0); if (err) { DPRINTFN(0, "Resuming port failed\n"); return; } } /* resume settle time */ usb_pause_mtx(NULL, USB_MS_TO_TICKS(usb_port_resume_delay)); if (bus->methods->device_resume != NULL) { /* resume USB device on the USB controller */ (bus->methods->device_resume) (udev); } USB_BUS_LOCK(bus); /* set that this device is now resumed */ udev->flags.self_suspended = 0; #if USB_HAVE_POWERD /* make sure that we don't go into suspend right away */ udev->pwr_save.last_xfer_time = ticks; /* make sure the needed power masks are on */ if (udev->pwr_save.type_refs[UE_CONTROL] != 0) bus->hw_power_state |= USB_HW_POWER_CONTROL; if (udev->pwr_save.type_refs[UE_BULK] != 0) bus->hw_power_state |= USB_HW_POWER_BULK; if (udev->pwr_save.type_refs[UE_INTERRUPT] != 0) bus->hw_power_state |= USB_HW_POWER_INTERRUPT; if (udev->pwr_save.type_refs[UE_ISOCHRONOUS] != 0) bus->hw_power_state |= USB_HW_POWER_ISOC; #endif USB_BUS_UNLOCK(bus); if (bus->methods->set_hw_power != NULL) { /* always update hardware power! */ (bus->methods->set_hw_power) (bus); } usbd_sr_lock(udev); /* notify all sub-devices about resume */ err = usb_suspend_resume(udev, 0); usbd_sr_unlock(udev); /* check if peer has wakeup capability */ if (usb_peer_can_wakeup(udev)) { /* clear remote wakeup */ err = usbd_clear_dev_wakeup(udev); if (err) { DPRINTFN(0, "Clearing device " "remote wakeup failed: %s\n", usbd_errstr(err)); } } } /*------------------------------------------------------------------------* * usb_dev_suspend_peer * * This function will suspend an USB peer and do the required USB * signalling to get an USB device into the suspended state. *------------------------------------------------------------------------*/ static void usb_dev_suspend_peer(struct usb_device *udev) { struct usb_device *child; int err; uint8_t x; uint8_t nports; repeat: /* be NULL safe */ if (udev == NULL) return; /* check if already suspended */ if (udev->flags.self_suspended) return; /* we need a parent HUB to do suspend */ if (udev->parent_hub == NULL) return; DPRINTF("udev=%p\n", udev); /* check if the current device is a HUB */ if (udev->hub != NULL) { nports = udev->hub->nports; /* check if all devices on the HUB are suspended */ for (x = 0; x != nports; x++) { child = usb_bus_port_get_device(udev->bus, udev->hub->ports + x); if (child == NULL) continue; if (child->flags.self_suspended) continue; DPRINTFN(1, "Port %u is busy on the HUB!\n", x + 1); return; } } if (usb_peer_can_wakeup(udev)) { /* * This request needs to be done before we set * "udev->flags.self_suspended": */ /* allow device to do remote wakeup */ err = usbd_set_dev_wakeup(udev); if (err) { DPRINTFN(0, "Setting device " "remote wakeup failed\n"); } } USB_BUS_LOCK(udev->bus); /* * Checking for suspend condition and setting suspended bit * must be atomic! */ err = usb_peer_should_wakeup(udev); if (err == 0) { /* * Set that this device is suspended. This variable * must be set before calling USB controller suspend * callbacks. */ udev->flags.self_suspended = 1; } USB_BUS_UNLOCK(udev->bus); if (err != 0) { if (usb_peer_can_wakeup(udev)) { /* allow device to do remote wakeup */ err = usbd_clear_dev_wakeup(udev); if (err) { DPRINTFN(0, "Setting device " "remote wakeup failed\n"); } } if (udev->flags.usb_mode == USB_MODE_DEVICE) { /* resume parent HUB first */ usb_dev_resume_peer(udev->parent_hub); /* reduce chance of instant resume failure by waiting a little bit */ usb_pause_mtx(NULL, USB_MS_TO_TICKS(20)); /* resume current port (Valid in Host and Device Mode) */ err = usbd_req_clear_port_feature(udev->parent_hub, NULL, udev->port_no, UHF_PORT_SUSPEND); /* resume settle time */ usb_pause_mtx(NULL, USB_MS_TO_TICKS(usb_port_resume_delay)); } DPRINTF("Suspend was cancelled!\n"); return; } usbd_sr_lock(udev); /* notify all sub-devices about suspend */ err = usb_suspend_resume(udev, 1); usbd_sr_unlock(udev); if (udev->bus->methods->device_suspend != NULL) { usb_timeout_t temp; /* suspend device on the USB controller */ (udev->bus->methods->device_suspend) (udev); /* do DMA delay */ temp = usbd_get_dma_delay(udev); if (temp != 0) usb_pause_mtx(NULL, USB_MS_TO_TICKS(temp)); } if (usb_device_20_compatible(udev)) { /* suspend current port */ err = usbd_req_set_port_feature(udev->parent_hub, NULL, udev->port_no, UHF_PORT_SUSPEND); if (err) { DPRINTFN(0, "Suspending port failed\n"); return; } } else { /* suspend current port */ err = usbd_req_set_port_link_state(udev->parent_hub, NULL, udev->port_no, UPS_PORT_LS_U3); if (err) { DPRINTFN(0, "Suspending port failed\n"); return; } } udev = udev->parent_hub; goto repeat; } /*------------------------------------------------------------------------* * usbd_set_power_mode * * This function will set the power mode, see USB_POWER_MODE_XXX for a * USB device. *------------------------------------------------------------------------*/ void usbd_set_power_mode(struct usb_device *udev, uint8_t power_mode) { /* filter input argument */ if ((power_mode != USB_POWER_MODE_ON) && (power_mode != USB_POWER_MODE_OFF)) power_mode = USB_POWER_MODE_SAVE; power_mode = usbd_filter_power_mode(udev, power_mode); udev->power_mode = power_mode; /* update copy of power mode */ #if USB_HAVE_POWERD usb_bus_power_update(udev->bus); #else usb_needs_explore(udev->bus, 0 /* no probe */ ); #endif } /*------------------------------------------------------------------------* * usbd_filter_power_mode * * This function filters the power mode based on hardware requirements. *------------------------------------------------------------------------*/ uint8_t usbd_filter_power_mode(struct usb_device *udev, uint8_t power_mode) { const struct usb_bus_methods *mtod; int8_t temp; mtod = udev->bus->methods; temp = -1; if (mtod->get_power_mode != NULL) (mtod->get_power_mode) (udev, &temp); /* check if we should not filter */ if (temp < 0) return (power_mode); /* use fixed power mode given by hardware driver */ return (temp); } /*------------------------------------------------------------------------* * usbd_start_re_enumerate * * This function starts re-enumeration of the given USB device. This * function does not need to be called BUS-locked. This function does * not wait until the re-enumeration is completed. *------------------------------------------------------------------------*/ void usbd_start_re_enumerate(struct usb_device *udev) { if (udev->re_enumerate_wait == USB_RE_ENUM_DONE) { udev->re_enumerate_wait = USB_RE_ENUM_START; usb_needs_explore(udev->bus, 0); } } /*-----------------------------------------------------------------------* * usbd_start_set_config * * This function starts setting a USB configuration. This function * does not need to be called BUS-locked. This function does not wait * until the set USB configuratino is completed. *------------------------------------------------------------------------*/ usb_error_t usbd_start_set_config(struct usb_device *udev, uint8_t index) { if (udev->re_enumerate_wait == USB_RE_ENUM_DONE) { if (udev->curr_config_index == index) { /* no change needed */ return (0); } udev->next_config_index = index; udev->re_enumerate_wait = USB_RE_ENUM_SET_CONFIG; usb_needs_explore(udev->bus, 0); return (0); } else if (udev->re_enumerate_wait == USB_RE_ENUM_SET_CONFIG) { if (udev->next_config_index == index) { /* no change needed */ return (0); } } return (USB_ERR_PENDING_REQUESTS); } diff --git a/sys/dev/usb/usb_hub_acpi.c b/sys/dev/usb/usb_hub_acpi.c index 2de28f592ca1..bd7271a2f2c6 100644 --- a/sys/dev/usb/usb_hub_acpi.c +++ b/sys/dev/usb/usb_hub_acpi.c @@ -1,602 +1,599 @@ /* $FreeBSD$ */ /*- * SPDX-License-Identifier: BSD-2-Clause-NetBSD * * Copyright (c) 1998 The NetBSD Foundation, Inc. All rights reserved. * Copyright (c) 1998 Lennart Augustsson. All rights reserved. * Copyright (c) 2008-2010 Hans Petter Selasky. All rights reserved. * Copyright (c) 2019 Takanori Watanabe. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * USB spec: http://www.usb.org/developers/docs/usbspec.zip */ #ifdef USB_GLOBAL_INCLUDE_FILE #include USB_GLOBAL_INCLUDE_FILE #else #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define USB_DEBUG_VAR uhub_debug #include #include #include #include #include #include #include #include #include #include #include #include #endif /* USB_GLOBAL_INCLUDE_FILE */ #include #include #include #include #include #define ACPI_PLD_SIZE 20 struct acpi_uhub_port { ACPI_HANDLE handle; #define ACPI_UPC_CONNECTABLE 0x80000000 #define ACPI_UPC_PORTTYPE(x) ((x)&0xff) uint32_t upc; uint8_t pld[ACPI_PLD_SIZE]; }; struct acpi_uhub_softc { struct uhub_softc usc; uint8_t nports; ACPI_HANDLE ah; struct acpi_uhub_port *port; }; static UINT32 acpi_uhub_find_rh_cb(ACPI_HANDLE ah, UINT32 nl, void *ctx, void **status) { ACPI_DEVICE_INFO *devinfo; UINT32 ret; *status = NULL; devinfo = NULL; ret = AcpiGetObjectInfo(ah, &devinfo); if (ACPI_SUCCESS(ret)) { if ((devinfo->Valid & ACPI_VALID_ADR) && (devinfo->Address == 0)) { ret = AE_CTRL_TERMINATE; *status = ah; } AcpiOsFree(devinfo); } return (ret); } static const char * acpi_uhub_upc_type(uint8_t type) { const char *typelist[] = {"TypeA", "MiniAB", "Express", "USB3-A", "USB3-B", "USB-MicroB", "USB3-MicroAB", "USB3-PowerB", "TypeC-USB2", "TypeC-Switch", "TypeC-nonSwitch"}; const int last = sizeof(typelist) / sizeof(typelist[0]); if (type == 0xff) { return "Proprietary"; } return (type < last) ? typelist[type] : "Unknown"; } static int acpi_uhub_parse_upc(device_t dev, unsigned int p, ACPI_HANDLE ah, struct sysctl_oid_list *poid) { ACPI_BUFFER buf; struct acpi_uhub_softc *sc = device_get_softc(dev); struct acpi_uhub_port *port = &sc->port[p - 1]; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; if (AcpiEvaluateObject(ah, "_UPC", NULL, &buf) == AE_OK) { ACPI_OBJECT *obj = buf.Pointer; UINT64 porttypenum, conn; uint8_t *connectable; acpi_PkgInt(obj, 0, &conn); acpi_PkgInt(obj, 1, &porttypenum); connectable = conn ? "" : "non"; port->upc = porttypenum; port->upc |= (conn) ? (ACPI_UPC_CONNECTABLE) : 0; if (usb_debug) device_printf(dev, "Port %u %sconnectable %s\n", p, connectable, acpi_uhub_upc_type(porttypenum)); SYSCTL_ADD_U32( device_get_sysctl_ctx(dev), poid, OID_AUTO, "upc", CTLFLAG_RD | CTLFLAG_MPSAFE, SYSCTL_NULL_U32_PTR, port->upc, "UPC value. MSB is visible flag"); } AcpiOsFree(buf.Pointer); return (0); } static int acpi_uhub_port_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_uhub_port *port = oidp->oid_arg1; struct sbuf sb; int error; sbuf_new_for_sysctl(&sb, NULL, 256, req); sbuf_printf(&sb, "Handle %s\n", acpi_name(port->handle)); if (port->upc == 0xffffffff) { sbuf_printf(&sb, "\tNo information\n"); goto end; } sbuf_printf(&sb, "\t"); if (port->upc & ACPI_UPC_CONNECTABLE) { sbuf_printf(&sb, "Connectable "); } sbuf_printf(&sb, "%s port\n", acpi_uhub_upc_type(port->upc & 0xff)); if ((port->pld[0] & 0x80) == 0) { sbuf_printf(&sb, "\tColor:#%02x%02x%02x\n", port->pld[1], port->pld[2], port->pld[3]); } sbuf_printf(&sb, "\tWidth %d mm Height %d mm\n", port->pld[4] | (port->pld[5] << 8), port->pld[6] | (port->pld[7] << 8)); if (port->pld[8] & 1) { sbuf_printf(&sb, "\tVisible\n"); } if (port->pld[8] & 2) { sbuf_printf(&sb, "\tDock\n"); } if (port->pld[8] & 4) { sbuf_printf(&sb, "\tLid\n"); } { int panelpos = (port->pld[8] >> 3) & 7; const char *panposstr[] = {"Top", "Bottom", "Left", "Right", "Front", "Back", "Unknown", "Invalid"}; const char *shapestr[] = { "Round", "Oval", "Square", "VRect", "HRect", "VTrape", "HTrape", "Unknown", "Chamferd", "Rsvd", "Rsvd", "Rsvd", "Rsvd", "Rsvd", "Rsvd", "Rsvd", "Rsvd"}; sbuf_printf(&sb, "\tPanelPosition: %s\n", panposstr[panelpos]); if (panelpos < 6) { const char *posstr[] = {"Upper", "Center", "Lower", "Invalid"}; sbuf_printf(&sb, "\tVertPosition: %s\n", posstr[(port->pld[8] >> 6) & 3]); sbuf_printf(&sb, "\tHorizPosition: %s\n", posstr[(port->pld[9]) & 3]); } sbuf_printf(&sb, "\tShape: %s\n", shapestr[(port->pld[9] >> 2) & 0xf]); sbuf_printf(&sb, "\tGroup Orientation %s\n", ((port->pld[9] >> 6) & 1) ? "Vertical" : "Horizontal"); sbuf_printf(&sb, "\tGroupToken %x\n", ((port->pld[9] >> 7) | (port->pld[10] << 1)) & 0xff); sbuf_printf(&sb, "\tGroupPosition %x\n", ((port->pld[10] >> 7) | (port->pld[11] << 1)) & 0xff); sbuf_printf(&sb, "\t%s %s %s\n", (port->pld[11] & 0x80) ? "Bay" : "", (port->pld[12] & 1) ? "Eject" : "", (port->pld[12] & 2) ? "OSPM" : "" ); } if ((port->pld[0] & 0x7f) >= 2) { sbuf_printf(&sb, "\tVOFF%d mm HOFF %dmm", port->pld[16] | (port->pld[17] << 8), port->pld[18] | (port->pld[19] << 8)); } end: error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } static int acpi_uhub_parse_pld(device_t dev, unsigned int p, ACPI_HANDLE ah, struct sysctl_oid_list *tree) { ACPI_BUFFER buf; struct acpi_uhub_softc *sc = device_get_softc(dev); struct acpi_uhub_port *port = &sc->port[p - 1]; buf.Pointer = NULL; buf.Length = ACPI_ALLOCATE_BUFFER; if (AcpiEvaluateObject(ah, "_PLD", NULL, &buf) == AE_OK) { ACPI_OBJECT *obj; unsigned char *resbuf; int len; obj = buf.Pointer; if (obj->Type == ACPI_TYPE_PACKAGE && obj->Package.Elements[0].Type == ACPI_TYPE_BUFFER) { ACPI_OBJECT *obj1; obj1 = &obj->Package.Elements[0]; len = obj1->Buffer.Length; resbuf = obj1->Buffer.Pointer; } else if (obj->Type == ACPI_TYPE_BUFFER) { len = obj->Buffer.Length; resbuf = obj->Buffer.Pointer; } else { goto skip; } len = (len < ACPI_PLD_SIZE) ? len : ACPI_PLD_SIZE; memcpy(port->pld, resbuf, len); SYSCTL_ADD_OPAQUE( device_get_sysctl_ctx(dev), tree, OID_AUTO, "pldraw", CTLFLAG_RD | CTLFLAG_MPSAFE, port->pld, len, "A", "Raw PLD value"); if (usb_debug) { device_printf(dev, "Revision:%d\n", resbuf[0] & 0x7f); if ((resbuf[0] & 0x80) == 0) { device_printf(dev, "Color:#%02x%02x%02x\n", resbuf[1], resbuf[2], resbuf[3]); } device_printf(dev, "Width %d mm Height %d mm\n", resbuf[4] | (resbuf[5] << 8), resbuf[6] | (resbuf[7] << 8)); if (resbuf[8] & 1) { device_printf(dev, "Visible\n"); } if (resbuf[8] & 2) { device_printf(dev, "Dock\n"); } if (resbuf[8] & 4) { device_printf(dev, "Lid\n"); } device_printf(dev, "PanelPosition: %d\n", (resbuf[8] >> 3) & 7); device_printf(dev, "VertPosition: %d\n", (resbuf[8] >> 6) & 3); device_printf(dev, "HorizPosition: %d\n", (resbuf[9]) & 3); device_printf(dev, "Shape: %d\n", (resbuf[9] >> 2) & 0xf); device_printf(dev, "80: %02x, %02x, %02x\n", resbuf[9], resbuf[10], resbuf[11]); device_printf(dev, "96: %02x, %02x, %02x, %02x\n", resbuf[12], resbuf[13], resbuf[14], resbuf[15]); if ((resbuf[0] & 0x7f) >= 2) { device_printf(dev, "VOFF%d mm HOFF %dmm", resbuf[16] | (resbuf[17] << 8), resbuf[18] | (resbuf[19] << 8)); } } skip: AcpiOsFree(buf.Pointer); } return (0); } static ACPI_STATUS acpi_uhub_find_rh(device_t dev, ACPI_HANDLE *ah) { device_t grand; ACPI_HANDLE gah; *ah = NULL; grand = device_get_parent(device_get_parent(dev)); if ((gah = acpi_get_handle(grand)) == NULL) return (AE_ERROR); return (AcpiWalkNamespace(ACPI_TYPE_DEVICE, gah, 1, acpi_uhub_find_rh_cb, NULL, dev, ah)); } static ACPI_STATUS acpi_usb_hub_port_probe_cb(ACPI_HANDLE ah, UINT32 lv, void *ctx, void **rv) { ACPI_DEVICE_INFO *devinfo; device_t dev = ctx; struct acpi_uhub_softc *sc = device_get_softc(dev); UINT32 ret; ret = AcpiGetObjectInfo(ah, &devinfo); if (ACPI_SUCCESS(ret)) { if ((devinfo->Valid & ACPI_VALID_ADR) && (devinfo->Address > 0) && (devinfo->Address <= (uint64_t)sc->nports)) { char buf[] = "portXXX"; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *oid; struct sysctl_oid_list *tree; snprintf(buf, sizeof(buf), "port%ju", (uintmax_t)devinfo->Address); oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "port nodes"); tree = SYSCTL_CHILDREN(oid); sc->port[devinfo->Address - 1].handle = ah; sc->port[devinfo->Address - 1].upc = 0xffffffff; acpi_uhub_parse_upc(dev, devinfo->Address, ah, tree); acpi_uhub_parse_pld(dev, devinfo->Address, ah, tree); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), tree, OID_AUTO, "info", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, &sc->port[devinfo->Address - 1], 0, acpi_uhub_port_sysctl, "A", "Port information"); } AcpiOsFree(devinfo); } return (AE_OK); } static ACPI_STATUS acpi_usb_hub_port_probe(device_t dev, ACPI_HANDLE ah) { return (AcpiWalkNamespace(ACPI_TYPE_DEVICE, ah, 1, acpi_usb_hub_port_probe_cb, NULL, dev, NULL)); } static int acpi_uhub_root_probe(device_t dev) { ACPI_STATUS status; ACPI_HANDLE ah; if (acpi_disabled("usb")) return (ENXIO); status = acpi_uhub_find_rh(dev, &ah); if (ACPI_SUCCESS(status) && ah != NULL && uhub_probe(dev) <= 0) { /* success prior than non-ACPI USB HUB */ return (BUS_PROBE_DEFAULT + 1); } return (ENXIO); } static int acpi_uhub_probe(device_t dev) { ACPI_HANDLE ah; if (acpi_disabled("usb")) return (ENXIO); ah = acpi_get_handle(dev); if (ah == NULL) return (ENXIO); if (uhub_probe(dev) <= 0) { /* success prior than non-ACPI USB HUB */ return (BUS_PROBE_DEFAULT + 1); } return (ENXIO); } static int acpi_uhub_attach_common(device_t dev) { struct usb_hub *uh; struct acpi_uhub_softc *sc = device_get_softc(dev); ACPI_STATUS status; int ret = ENXIO; uh = sc->usc.sc_udev->hub; sc->nports = uh->nports; sc->port = malloc(sizeof(struct acpi_uhub_port) * uh->nports, M_USBDEV, M_WAITOK | M_ZERO); status = acpi_usb_hub_port_probe(dev, sc->ah); if (ACPI_SUCCESS(status)){ ret = 0; } return (ret); } static int acpi_uhub_detach(device_t dev) { struct acpi_uhub_softc *sc = device_get_softc(dev); free(sc->port, M_USBDEV); return (uhub_detach(dev)); } static int acpi_uhub_root_attach(device_t dev) { int ret; struct acpi_uhub_softc *sc = device_get_softc(dev); if (ACPI_FAILURE(acpi_uhub_find_rh(dev, &sc->ah)) || (sc->ah == NULL)) { return (ENXIO); } if ((ret = uhub_attach(dev)) != 0) { return (ret); } if ((ret = acpi_uhub_attach_common(dev)) != 0) { acpi_uhub_detach(dev); } return ret; } static int acpi_uhub_attach(device_t dev) { int ret; struct acpi_uhub_softc *sc = device_get_softc(dev); sc->ah = acpi_get_handle(dev); if (sc->ah == NULL) { return (ENXIO); } if ((ret = uhub_attach(dev)) != 0) { return (ret); } if ((ret = acpi_uhub_attach_common(dev)) != 0) { acpi_uhub_detach(dev); } return (ret); } static int acpi_uhub_read_ivar(device_t dev, device_t child, int idx, uintptr_t *res) { struct hub_result hres; struct acpi_uhub_softc *sc = device_get_softc(dev); ACPI_HANDLE ah; mtx_lock(&Giant); uhub_find_iface_index(sc->usc.sc_udev->hub, child, &hres); mtx_unlock(&Giant); if ((idx == ACPI_IVAR_HANDLE) && (hres.portno > 0) && (hres.portno <= sc->nports) && (ah = sc->port[hres.portno - 1].handle)) { *res = (uintptr_t)ah; return (0); } return (ENXIO); } static int -acpi_uhub_child_location_string(device_t parent, device_t child, - char *buf, size_t buflen) +acpi_uhub_child_location(device_t parent, device_t child, struct sbuf *sb) { ACPI_HANDLE ah; - uhub_child_location_string(parent, child, buf, buflen); + uhub_child_location(parent, child, sb); ah = acpi_get_handle(child); - if (ah != NULL) { - strlcat(buf, " handle=", buflen); - strlcat(buf, acpi_name(ah), buflen); - } + if (ah != NULL) + sbuf_printf(sb, " handle=%s", acpi_name(ah)); return (0); } static device_method_t acpi_uhub_methods[] = { DEVMETHOD(device_probe, acpi_uhub_probe), DEVMETHOD(device_attach, acpi_uhub_attach), DEVMETHOD(device_detach, acpi_uhub_detach), - DEVMETHOD(bus_child_location_str, acpi_uhub_child_location_string), + DEVMETHOD(bus_child_location, acpi_uhub_child_location), DEVMETHOD(bus_read_ivar, acpi_uhub_read_ivar), DEVMETHOD_END }; static device_method_t acpi_uhub_root_methods[] = { DEVMETHOD(device_probe, acpi_uhub_root_probe), DEVMETHOD(device_attach, acpi_uhub_root_attach), DEVMETHOD(device_detach, acpi_uhub_detach), DEVMETHOD(bus_read_ivar, acpi_uhub_read_ivar), - DEVMETHOD(bus_child_location_str, acpi_uhub_child_location_string), + DEVMETHOD(bus_child_location, acpi_uhub_child_location), DEVMETHOD_END }; static devclass_t uhub_devclass; extern driver_t uhub_driver; static kobj_class_t uhub_baseclasses[] = {&uhub_driver, NULL}; static driver_t acpi_uhub_driver = { .name = "uhub", .methods = acpi_uhub_methods, .size = sizeof(struct acpi_uhub_softc), .baseclasses = uhub_baseclasses, }; static driver_t acpi_uhub_root_driver = { .name = "uhub", .methods = acpi_uhub_root_methods, .size = sizeof(struct acpi_uhub_softc), .baseclasses = uhub_baseclasses, }; DRIVER_MODULE(uacpi, uhub, acpi_uhub_driver, uhub_devclass, 0, 0); DRIVER_MODULE(uacpi, usbus, acpi_uhub_root_driver, uhub_devclass, 0, 0); MODULE_DEPEND(uacpi, acpi, 1, 1, 1); MODULE_DEPEND(uacpi, usb, 1, 1, 1); MODULE_VERSION(uacpi, 1); diff --git a/sys/dev/usb/usb_hub_private.h b/sys/dev/usb/usb_hub_private.h index ddb401082061..0422eda2bfa9 100644 --- a/sys/dev/usb/usb_hub_private.h +++ b/sys/dev/usb/usb_hub_private.h @@ -1,85 +1,85 @@ /* $FreeBSD$ */ /*- * SPDX-License-Identifier: BSD-2-Clause-NetBSD * * Copyright (c) 1998 The NetBSD Foundation, Inc. All rights reserved. * Copyright (c) 1998 Lennart Augustsson. All rights reserved. * Copyright (c) 2008-2010 Hans Petter Selasky. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * USB spec: http://www.usb.org/developers/docs/usbspec.zip */ #ifndef USB_HUB_PRIVATE_H_ #define USB_HUB_PRIVATE_H_ #define UHUB_INTR_INTERVAL 250 /* ms */ enum { UHUB_INTR_TRANSFER, #if USB_HAVE_TT_SUPPORT UHUB_RESET_TT_TRANSFER, #endif UHUB_N_TRANSFER, }; struct uhub_current_state { uint16_t port_change; uint16_t port_status; }; struct uhub_softc { struct uhub_current_state sc_st; /* current state */ #if (USB_HAVE_FIXED_PORT != 0) struct usb_hub sc_hub; #endif device_t sc_dev; /* base device */ struct mtx sc_mtx; /* our mutex */ struct usb_device *sc_udev; /* USB device */ struct usb_xfer *sc_xfer[UHUB_N_TRANSFER]; /* interrupt xfer */ #if USB_HAVE_DISABLE_ENUM int sc_disable_enumeration; int sc_disable_port_power; #endif uint8_t sc_usb_port_errors; /* error counter */ #define UHUB_USB_PORT_ERRORS_MAX 4 uint8_t sc_flags; #define UHUB_FLAG_DID_EXPLORE 0x01 }; struct hub_result { struct usb_device *udev; uint8_t portno; uint8_t iface_index; }; void uhub_find_iface_index(struct usb_hub *hub, device_t child, struct hub_result *res); device_probe_t uhub_probe; device_attach_t uhub_attach; device_detach_t uhub_detach; -bus_child_location_str_t uhub_child_location_string; +bus_child_location_t uhub_child_location; #endif diff --git a/sys/dev/virtio/mmio/virtio_mmio.c b/sys/dev/virtio/mmio/virtio_mmio.c index 5672a6f0f69c..ca89bac5ff7c 100644 --- a/sys/dev/virtio/mmio/virtio_mmio.c +++ b/sys/dev/virtio/mmio/virtio_mmio.c @@ -1,960 +1,960 @@ /*- * Copyright (c) 2014 Ruslan Bukin * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * VirtIO MMIO interface. * This driver is heavily based on VirtIO PCI interface driver. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_mmio_if.h" #include "virtio_bus_if.h" #include "virtio_if.h" struct vtmmio_virtqueue { struct virtqueue *vtv_vq; int vtv_no_intr; }; static int vtmmio_detach(device_t); static int vtmmio_suspend(device_t); static int vtmmio_resume(device_t); static int vtmmio_shutdown(device_t); static void vtmmio_driver_added(device_t, driver_t *); static void vtmmio_child_detached(device_t, device_t); static int vtmmio_read_ivar(device_t, device_t, int, uintptr_t *); static int vtmmio_write_ivar(device_t, device_t, int, uintptr_t); static uint64_t vtmmio_negotiate_features(device_t, uint64_t); static int vtmmio_with_feature(device_t, uint64_t); static void vtmmio_set_virtqueue(struct vtmmio_softc *sc, struct virtqueue *vq, uint32_t size); static int vtmmio_alloc_virtqueues(device_t, int, int, struct vq_alloc_info *); static int vtmmio_setup_intr(device_t, enum intr_type); static void vtmmio_stop(device_t); static void vtmmio_poll(device_t); static int vtmmio_reinit(device_t, uint64_t); static void vtmmio_reinit_complete(device_t); static void vtmmio_notify_virtqueue(device_t, uint16_t, bus_size_t); static uint8_t vtmmio_get_status(device_t); static void vtmmio_set_status(device_t, uint8_t); static void vtmmio_read_dev_config(device_t, bus_size_t, void *, int); static void vtmmio_write_dev_config(device_t, bus_size_t, void *, int); static void vtmmio_describe_features(struct vtmmio_softc *, const char *, uint64_t); static void vtmmio_probe_and_attach_child(struct vtmmio_softc *); static int vtmmio_reinit_virtqueue(struct vtmmio_softc *, int); static void vtmmio_free_interrupts(struct vtmmio_softc *); static void vtmmio_free_virtqueues(struct vtmmio_softc *); static void vtmmio_release_child_resources(struct vtmmio_softc *); static void vtmmio_reset(struct vtmmio_softc *); static void vtmmio_select_virtqueue(struct vtmmio_softc *, int); static void vtmmio_vq_intr(void *); /* * I/O port read/write wrappers. */ #define vtmmio_write_config_1(sc, o, v) \ do { \ if (sc->platform != NULL) \ VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ bus_write_1((sc)->res[0], (o), (v)); \ if (sc->platform != NULL) \ VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ } while (0) #define vtmmio_write_config_2(sc, o, v) \ do { \ if (sc->platform != NULL) \ VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ bus_write_2((sc)->res[0], (o), (v)); \ if (sc->platform != NULL) \ VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ } while (0) #define vtmmio_write_config_4(sc, o, v) \ do { \ if (sc->platform != NULL) \ VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ bus_write_4((sc)->res[0], (o), (v)); \ if (sc->platform != NULL) \ VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ } while (0) #define vtmmio_read_config_1(sc, o) \ bus_read_1((sc)->res[0], (o)) #define vtmmio_read_config_2(sc, o) \ bus_read_2((sc)->res[0], (o)) #define vtmmio_read_config_4(sc, o) \ bus_read_4((sc)->res[0], (o)) static device_method_t vtmmio_methods[] = { /* Device interface. */ DEVMETHOD(device_attach, vtmmio_attach), DEVMETHOD(device_detach, vtmmio_detach), DEVMETHOD(device_suspend, vtmmio_suspend), DEVMETHOD(device_resume, vtmmio_resume), DEVMETHOD(device_shutdown, vtmmio_shutdown), /* Bus interface. */ DEVMETHOD(bus_driver_added, vtmmio_driver_added), DEVMETHOD(bus_child_detached, vtmmio_child_detached), - DEVMETHOD(bus_child_pnpinfo_str, virtio_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, virtio_child_pnpinfo), DEVMETHOD(bus_read_ivar, vtmmio_read_ivar), DEVMETHOD(bus_write_ivar, vtmmio_write_ivar), /* VirtIO bus interface. */ DEVMETHOD(virtio_bus_negotiate_features, vtmmio_negotiate_features), DEVMETHOD(virtio_bus_with_feature, vtmmio_with_feature), DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues), DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr), DEVMETHOD(virtio_bus_stop, vtmmio_stop), DEVMETHOD(virtio_bus_poll, vtmmio_poll), DEVMETHOD(virtio_bus_reinit, vtmmio_reinit), DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete), DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue), DEVMETHOD(virtio_bus_read_device_config, vtmmio_read_dev_config), DEVMETHOD(virtio_bus_write_device_config, vtmmio_write_dev_config), DEVMETHOD_END }; DEFINE_CLASS_0(virtio_mmio, vtmmio_driver, vtmmio_methods, sizeof(struct vtmmio_softc)); MODULE_VERSION(virtio_mmio, 1); int vtmmio_probe(device_t dev) { struct vtmmio_softc *sc; int rid; uint32_t magic, version; sc = device_get_softc(dev); rid = 0; sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->res[0] == NULL) { device_printf(dev, "Cannot allocate memory window.\n"); return (ENXIO); } magic = vtmmio_read_config_4(sc, VIRTIO_MMIO_MAGIC_VALUE); if (magic != VIRTIO_MMIO_MAGIC_VIRT) { device_printf(dev, "Bad magic value %#x\n", magic); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); return (ENXIO); } version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION); if (version < 1 || version > 2) { device_printf(dev, "Unsupported version: %#x\n", version); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); return (ENXIO); } if (vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID) == 0) { bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); return (ENXIO); } bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); device_set_desc(dev, "VirtIO MMIO adapter"); return (BUS_PROBE_DEFAULT); } static int vtmmio_setup_intr(device_t dev, enum intr_type type) { struct vtmmio_softc *sc; int rid; int err; sc = device_get_softc(dev); if (sc->platform != NULL) { err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev, vtmmio_vq_intr, sc); if (err == 0) { /* Okay we have backend-specific interrupts */ return (0); } } rid = 0; sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->res[1]) { device_printf(dev, "Can't allocate interrupt\n"); return (ENXIO); } if (bus_setup_intr(dev, sc->res[1], type | INTR_MPSAFE, NULL, vtmmio_vq_intr, sc, &sc->ih)) { device_printf(dev, "Can't setup the interrupt\n"); return (ENXIO); } return (0); } int vtmmio_attach(device_t dev) { struct vtmmio_softc *sc; device_t child; int rid; sc = device_get_softc(dev); sc->dev = dev; rid = 0; sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->res[0] == NULL) { device_printf(dev, "Cannot allocate memory window.\n"); return (ENXIO); } sc->vtmmio_version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION); vtmmio_reset(sc); /* Tell the host we've noticed this device. */ vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); if ((child = device_add_child(dev, NULL, -1)) == NULL) { device_printf(dev, "Cannot create child device.\n"); vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); vtmmio_detach(dev); return (ENOMEM); } sc->vtmmio_child_dev = child; vtmmio_probe_and_attach_child(sc); return (0); } static int vtmmio_detach(device_t dev) { struct vtmmio_softc *sc; device_t child; int error; sc = device_get_softc(dev); if ((child = sc->vtmmio_child_dev) != NULL) { error = device_delete_child(dev, child); if (error) return (error); sc->vtmmio_child_dev = NULL; } vtmmio_reset(sc); if (sc->res[0] != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res[0]); sc->res[0] = NULL; } return (0); } static int vtmmio_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int vtmmio_resume(device_t dev) { return (bus_generic_resume(dev)); } static int vtmmio_shutdown(device_t dev) { (void) bus_generic_shutdown(dev); /* Forcibly stop the host device. */ vtmmio_stop(dev); return (0); } static void vtmmio_driver_added(device_t dev, driver_t *driver) { struct vtmmio_softc *sc; sc = device_get_softc(dev); vtmmio_probe_and_attach_child(sc); } static void vtmmio_child_detached(device_t dev, device_t child) { struct vtmmio_softc *sc; sc = device_get_softc(dev); vtmmio_reset(sc); vtmmio_release_child_resources(sc); } static int vtmmio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct vtmmio_softc *sc; sc = device_get_softc(dev); if (sc->vtmmio_child_dev != child) return (ENOENT); switch (index) { case VIRTIO_IVAR_DEVTYPE: case VIRTIO_IVAR_SUBDEVICE: *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID); break; case VIRTIO_IVAR_VENDOR: *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID); break; case VIRTIO_IVAR_SUBVENDOR: case VIRTIO_IVAR_DEVICE: /* * Dummy value for fields not present in this bus. Used by - * bus-agnostic virtio_child_pnpinfo_str. + * bus-agnostic virtio_child_pnpinfo. */ *result = 0; break; case VIRTIO_IVAR_MODERN: /* * There are several modern (aka MMIO v2) spec compliance * issues with this driver, but keep the status quo. */ *result = sc->vtmmio_version > 1; break; default: return (ENOENT); } return (0); } static int vtmmio_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct vtmmio_softc *sc; sc = device_get_softc(dev); if (sc->vtmmio_child_dev != child) return (ENOENT); switch (index) { case VIRTIO_IVAR_FEATURE_DESC: sc->vtmmio_child_feat_desc = (void *) value; break; default: return (ENOENT); } return (0); } static uint64_t vtmmio_negotiate_features(device_t dev, uint64_t child_features) { struct vtmmio_softc *sc; uint64_t host_features, features; sc = device_get_softc(dev); if (sc->vtmmio_version > 1) { child_features |= VIRTIO_F_VERSION_1; } vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 1); host_features = vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES); host_features <<= 32; vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 0); host_features |= vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES); vtmmio_describe_features(sc, "host", host_features); /* * Limit negotiated features to what the driver, virtqueue, and * host all support. */ features = host_features & child_features; features = virtio_filter_transport_features(features); sc->vtmmio_features = features; vtmmio_describe_features(sc, "negotiated", features); vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 1); vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features >> 32); vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 0); vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features); return (features); } static int vtmmio_with_feature(device_t dev, uint64_t feature) { struct vtmmio_softc *sc; sc = device_get_softc(dev); return ((sc->vtmmio_features & feature) != 0); } static void vtmmio_set_virtqueue(struct vtmmio_softc *sc, struct virtqueue *vq, uint32_t size) { vm_paddr_t paddr; vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size); if (sc->vtmmio_version == 1) { vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN, VIRTIO_MMIO_VRING_ALIGN); paddr = virtqueue_paddr(vq); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, paddr >> PAGE_SHIFT); } else { paddr = virtqueue_desc_paddr(vq); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_LOW, paddr); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_HIGH, ((uint64_t)paddr) >> 32); paddr = virtqueue_avail_paddr(vq); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_LOW, paddr); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_HIGH, ((uint64_t)paddr) >> 32); paddr = virtqueue_used_paddr(vq); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_LOW, paddr); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_HIGH, ((uint64_t)paddr) >> 32); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 1); } } static int vtmmio_alloc_virtqueues(device_t dev, int flags, int nvqs, struct vq_alloc_info *vq_info) { struct vtmmio_virtqueue *vqx; struct vq_alloc_info *info; struct vtmmio_softc *sc; struct virtqueue *vq; uint32_t size; int idx, error; sc = device_get_softc(dev); if (sc->vtmmio_nvqs != 0) return (EALREADY); if (nvqs <= 0) return (EINVAL); sc->vtmmio_vqs = malloc(nvqs * sizeof(struct vtmmio_virtqueue), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtmmio_vqs == NULL) return (ENOMEM); if (sc->vtmmio_version == 1) { vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, (1 << PAGE_SHIFT)); } for (idx = 0; idx < nvqs; idx++) { vqx = &sc->vtmmio_vqs[idx]; info = &vq_info[idx]; vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx); vtmmio_select_virtqueue(sc, idx); size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); error = virtqueue_alloc(dev, idx, size, VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_VRING_ALIGN, ~(vm_paddr_t)0, info, &vq); if (error) { device_printf(dev, "cannot allocate virtqueue %d: %d\n", idx, error); break; } vtmmio_set_virtqueue(sc, vq, size); vqx->vtv_vq = *info->vqai_vq = vq; vqx->vtv_no_intr = info->vqai_intr == NULL; sc->vtmmio_nvqs++; } if (error) vtmmio_free_virtqueues(sc); return (error); } static void vtmmio_stop(device_t dev) { vtmmio_reset(device_get_softc(dev)); } static void vtmmio_poll(device_t dev) { struct vtmmio_softc *sc; sc = device_get_softc(dev); if (sc->platform != NULL) VIRTIO_MMIO_POLL(sc->platform); } static int vtmmio_reinit(device_t dev, uint64_t features) { struct vtmmio_softc *sc; int idx, error; sc = device_get_softc(dev); if (vtmmio_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) vtmmio_stop(dev); /* * Quickly drive the status through ACK and DRIVER. The device * does not become usable again until vtmmio_reinit_complete(). */ vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); vtmmio_negotiate_features(dev, features); if (sc->vtmmio_version == 1) { vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, (1 << PAGE_SHIFT)); } for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { error = vtmmio_reinit_virtqueue(sc, idx); if (error) return (error); } return (0); } static void vtmmio_reinit_complete(device_t dev) { vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); } static void vtmmio_notify_virtqueue(device_t dev, uint16_t queue, bus_size_t offset) { struct vtmmio_softc *sc; sc = device_get_softc(dev); MPASS(offset == VIRTIO_MMIO_QUEUE_NOTIFY); vtmmio_write_config_4(sc, offset, queue); } static uint8_t vtmmio_get_status(device_t dev) { struct vtmmio_softc *sc; sc = device_get_softc(dev); return (vtmmio_read_config_4(sc, VIRTIO_MMIO_STATUS)); } static void vtmmio_set_status(device_t dev, uint8_t status) { struct vtmmio_softc *sc; sc = device_get_softc(dev); if (status != VIRTIO_CONFIG_STATUS_RESET) status |= vtmmio_get_status(dev); vtmmio_write_config_4(sc, VIRTIO_MMIO_STATUS, status); } static void vtmmio_read_dev_config(device_t dev, bus_size_t offset, void *dst, int length) { struct vtmmio_softc *sc; bus_size_t off; uint8_t *d; int size; uint64_t low32, high32; sc = device_get_softc(dev); off = VIRTIO_MMIO_CONFIG + offset; /* * The non-legacy MMIO specification adds the following restriction: * * 4.2.2.2: For the device-specific configuration space, the driver * MUST use 8 bit wide accesses for 8 bit wide fields, 16 bit wide * and aligned accesses for 16 bit wide fields and 32 bit wide and * aligned accesses for 32 and 64 bit wide fields. * * The endianness also varies between non-legacy and legacy: * * 2.4: Note: The device configuration space uses the little-endian * format for multi-byte fields. * * 2.4.3: Note that for legacy interfaces, device configuration space * is generally the guest’s native endian, rather than PCI’s * little-endian. The correct endian-ness is documented for each * device. */ if (sc->vtmmio_version > 1) { switch (length) { case 1: *(uint8_t *)dst = vtmmio_read_config_1(sc, off); break; case 2: *(uint16_t *)dst = le16toh(vtmmio_read_config_2(sc, off)); break; case 4: *(uint32_t *)dst = le32toh(vtmmio_read_config_4(sc, off)); break; case 8: low32 = le32toh(vtmmio_read_config_4(sc, off)); high32 = le32toh(vtmmio_read_config_4(sc, off + 4)); *(uint64_t *)dst = (high32 << 32) | low32; break; default: panic("%s: invalid length %d\n", __func__, length); } return; } for (d = dst; length > 0; d += size, off += size, length -= size) { #ifdef ALLOW_WORD_ALIGNED_ACCESS if (length >= 4) { size = 4; *(uint32_t *)d = vtmmio_read_config_4(sc, off); } else if (length >= 2) { size = 2; *(uint16_t *)d = vtmmio_read_config_2(sc, off); } else #endif { size = 1; *d = vtmmio_read_config_1(sc, off); } } } static void vtmmio_write_dev_config(device_t dev, bus_size_t offset, void *src, int length) { struct vtmmio_softc *sc; bus_size_t off; uint8_t *s; int size; sc = device_get_softc(dev); off = VIRTIO_MMIO_CONFIG + offset; /* * The non-legacy MMIO specification adds size and alignment * restrctions. It also changes the endianness from native-endian to * little-endian. See vtmmio_read_dev_config. */ if (sc->vtmmio_version > 1) { switch (length) { case 1: vtmmio_write_config_1(sc, off, *(uint8_t *)src); break; case 2: vtmmio_write_config_2(sc, off, htole16(*(uint16_t *)src)); break; case 4: vtmmio_write_config_4(sc, off, htole32(*(uint32_t *)src)); break; case 8: vtmmio_write_config_4(sc, off, htole32(*(uint64_t *)src)); vtmmio_write_config_4(sc, off + 4, htole32((*(uint64_t *)src) >> 32)); break; default: panic("%s: invalid length %d\n", __func__, length); } return; } for (s = src; length > 0; s += size, off += size, length -= size) { #ifdef ALLOW_WORD_ALIGNED_ACCESS if (length >= 4) { size = 4; vtmmio_write_config_4(sc, off, *(uint32_t *)s); } else if (length >= 2) { size = 2; vtmmio_write_config_2(sc, off, *(uint16_t *)s); } else #endif { size = 1; vtmmio_write_config_1(sc, off, *s); } } } static void vtmmio_describe_features(struct vtmmio_softc *sc, const char *msg, uint64_t features) { device_t dev, child; dev = sc->dev; child = sc->vtmmio_child_dev; if (device_is_attached(child) || bootverbose == 0) return; virtio_describe(dev, msg, features, sc->vtmmio_child_feat_desc); } static void vtmmio_probe_and_attach_child(struct vtmmio_softc *sc) { device_t dev, child; dev = sc->dev; child = sc->vtmmio_child_dev; if (child == NULL) return; if (device_get_state(child) != DS_NOTPRESENT) { return; } if (device_probe(child) != 0) { return; } vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); if (device_attach(child) != 0) { vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); vtmmio_reset(sc); vtmmio_release_child_resources(sc); /* Reset status for future attempt. */ vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); } else { vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); VIRTIO_ATTACH_COMPLETED(child); } } static int vtmmio_reinit_virtqueue(struct vtmmio_softc *sc, int idx) { struct vtmmio_virtqueue *vqx; struct virtqueue *vq; int error; uint16_t size; vqx = &sc->vtmmio_vqs[idx]; vq = vqx->vtv_vq; KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx)); vtmmio_select_virtqueue(sc, idx); size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); error = virtqueue_reinit(vq, size); if (error) return (error); vtmmio_set_virtqueue(sc, vq, size); return (0); } static void vtmmio_free_interrupts(struct vtmmio_softc *sc) { if (sc->ih != NULL) bus_teardown_intr(sc->dev, sc->res[1], sc->ih); if (sc->res[1] != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res[1]); } static void vtmmio_free_virtqueues(struct vtmmio_softc *sc) { struct vtmmio_virtqueue *vqx; int idx; for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { vqx = &sc->vtmmio_vqs[idx]; vtmmio_select_virtqueue(sc, idx); if (sc->vtmmio_version == 1) vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 0); else vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 0); virtqueue_free(vqx->vtv_vq); vqx->vtv_vq = NULL; } free(sc->vtmmio_vqs, M_DEVBUF); sc->vtmmio_vqs = NULL; sc->vtmmio_nvqs = 0; } static void vtmmio_release_child_resources(struct vtmmio_softc *sc) { vtmmio_free_interrupts(sc); vtmmio_free_virtqueues(sc); } static void vtmmio_reset(struct vtmmio_softc *sc) { /* * Setting the status to RESET sets the host device to * the original, uninitialized state. */ vtmmio_set_status(sc->dev, VIRTIO_CONFIG_STATUS_RESET); } static void vtmmio_select_virtqueue(struct vtmmio_softc *sc, int idx) { vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx); } static void vtmmio_vq_intr(void *arg) { struct vtmmio_virtqueue *vqx; struct vtmmio_softc *sc; struct virtqueue *vq; uint32_t status; int idx; sc = arg; status = vtmmio_read_config_4(sc, VIRTIO_MMIO_INTERRUPT_STATUS); vtmmio_write_config_4(sc, VIRTIO_MMIO_INTERRUPT_ACK, status); /* The config changed */ if (status & VIRTIO_MMIO_INT_CONFIG) if (sc->vtmmio_child_dev != NULL) VIRTIO_CONFIG_CHANGE(sc->vtmmio_child_dev); /* Notify all virtqueues. */ if (status & VIRTIO_MMIO_INT_VRING) { for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { vqx = &sc->vtmmio_vqs[idx]; if (vqx->vtv_no_intr == 0) { vq = vqx->vtv_vq; virtqueue_intr(vq); } } } } diff --git a/sys/dev/virtio/pci/virtio_pci_legacy.c b/sys/dev/virtio/pci/virtio_pci_legacy.c index a17dd22aa953..0524529d064b 100644 --- a/sys/dev/virtio/pci/virtio_pci_legacy.c +++ b/sys/dev/virtio/pci/virtio_pci_legacy.c @@ -1,791 +1,791 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for the legacy VirtIO PCI interface. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_bus_if.h" #include "virtio_pci_if.h" #include "virtio_if.h" struct vtpci_legacy_softc { device_t vtpci_dev; struct vtpci_common vtpci_common; int vtpci_res_type; struct resource *vtpci_res; struct resource *vtpci_msix_table_res; struct resource *vtpci_msix_pba_res; }; static int vtpci_legacy_probe(device_t); static int vtpci_legacy_attach(device_t); static int vtpci_legacy_detach(device_t); static int vtpci_legacy_suspend(device_t); static int vtpci_legacy_resume(device_t); static int vtpci_legacy_shutdown(device_t); static void vtpci_legacy_driver_added(device_t, driver_t *); static void vtpci_legacy_child_detached(device_t, device_t); static int vtpci_legacy_read_ivar(device_t, device_t, int, uintptr_t *); static int vtpci_legacy_write_ivar(device_t, device_t, int, uintptr_t); static uint8_t vtpci_legacy_read_isr(device_t); static uint16_t vtpci_legacy_get_vq_size(device_t, int); static bus_size_t vtpci_legacy_get_vq_notify_off(device_t, int); static void vtpci_legacy_set_vq(device_t, struct virtqueue *); static void vtpci_legacy_disable_vq(device_t, int); static int vtpci_legacy_register_cfg_msix(device_t, struct vtpci_interrupt *); static int vtpci_legacy_register_vq_msix(device_t, int idx, struct vtpci_interrupt *); static uint64_t vtpci_legacy_negotiate_features(device_t, uint64_t); static int vtpci_legacy_with_feature(device_t, uint64_t); static int vtpci_legacy_alloc_virtqueues(device_t, int, int, struct vq_alloc_info *); static int vtpci_legacy_setup_interrupts(device_t, enum intr_type); static void vtpci_legacy_stop(device_t); static int vtpci_legacy_reinit(device_t, uint64_t); static void vtpci_legacy_reinit_complete(device_t); static void vtpci_legacy_notify_vq(device_t, uint16_t, bus_size_t); static void vtpci_legacy_read_dev_config(device_t, bus_size_t, void *, int); static void vtpci_legacy_write_dev_config(device_t, bus_size_t, void *, int); static bool vtpci_legacy_setup_msix(struct vtpci_legacy_softc *sc); static void vtpci_legacy_teardown_msix(struct vtpci_legacy_softc *sc); static int vtpci_legacy_alloc_resources(struct vtpci_legacy_softc *); static void vtpci_legacy_free_resources(struct vtpci_legacy_softc *); static void vtpci_legacy_probe_and_attach_child(struct vtpci_legacy_softc *); static uint8_t vtpci_legacy_get_status(struct vtpci_legacy_softc *); static void vtpci_legacy_set_status(struct vtpci_legacy_softc *, uint8_t); static void vtpci_legacy_select_virtqueue(struct vtpci_legacy_softc *, int); static void vtpci_legacy_reset(struct vtpci_legacy_softc *); #define VIRTIO_PCI_LEGACY_CONFIG(_sc) \ VIRTIO_PCI_CONFIG_OFF(vtpci_is_msix_enabled(&(_sc)->vtpci_common)) #define vtpci_legacy_read_config_1(sc, o) \ bus_read_1((sc)->vtpci_res, (o)) #define vtpci_legacy_write_config_1(sc, o, v) \ bus_write_1((sc)->vtpci_res, (o), (v)) /* * VirtIO specifies that PCI Configuration area is guest endian. However, * since PCI devices are inherently little-endian, on big-endian systems * the bus layer transparently converts it to BE. For virtio-legacy, this * conversion is undesired, so an extra byte swap is required to fix it. */ #define vtpci_legacy_read_config_2(sc, o) \ le16toh(bus_read_2((sc)->vtpci_res, (o))) #define vtpci_legacy_read_config_4(sc, o) \ le32toh(bus_read_4((sc)->vtpci_res, (o))) #define vtpci_legacy_write_config_2(sc, o, v) \ bus_write_2((sc)->vtpci_res, (o), (htole16(v))) #define vtpci_legacy_write_config_4(sc, o, v) \ bus_write_4((sc)->vtpci_res, (o), (htole32(v))) /* PCI Header LE. On BE systems the bus layer takes care of byte swapping. */ #define vtpci_legacy_read_header_2(sc, o) \ bus_read_2((sc)->vtpci_res, (o)) #define vtpci_legacy_read_header_4(sc, o) \ bus_read_4((sc)->vtpci_res, (o)) #define vtpci_legacy_write_header_2(sc, o, v) \ bus_write_2((sc)->vtpci_res, (o), (v)) #define vtpci_legacy_write_header_4(sc, o, v) \ bus_write_4((sc)->vtpci_res, (o), (v)) static device_method_t vtpci_legacy_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, vtpci_legacy_probe), DEVMETHOD(device_attach, vtpci_legacy_attach), DEVMETHOD(device_detach, vtpci_legacy_detach), DEVMETHOD(device_suspend, vtpci_legacy_suspend), DEVMETHOD(device_resume, vtpci_legacy_resume), DEVMETHOD(device_shutdown, vtpci_legacy_shutdown), /* Bus interface. */ DEVMETHOD(bus_driver_added, vtpci_legacy_driver_added), DEVMETHOD(bus_child_detached, vtpci_legacy_child_detached), - DEVMETHOD(bus_child_pnpinfo_str, virtio_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, virtio_child_pnpinfo), DEVMETHOD(bus_read_ivar, vtpci_legacy_read_ivar), DEVMETHOD(bus_write_ivar, vtpci_legacy_write_ivar), /* VirtIO PCI interface. */ DEVMETHOD(virtio_pci_read_isr, vtpci_legacy_read_isr), DEVMETHOD(virtio_pci_get_vq_size, vtpci_legacy_get_vq_size), DEVMETHOD(virtio_pci_get_vq_notify_off, vtpci_legacy_get_vq_notify_off), DEVMETHOD(virtio_pci_set_vq, vtpci_legacy_set_vq), DEVMETHOD(virtio_pci_disable_vq, vtpci_legacy_disable_vq), DEVMETHOD(virtio_pci_register_cfg_msix, vtpci_legacy_register_cfg_msix), DEVMETHOD(virtio_pci_register_vq_msix, vtpci_legacy_register_vq_msix), /* VirtIO bus interface. */ DEVMETHOD(virtio_bus_negotiate_features, vtpci_legacy_negotiate_features), DEVMETHOD(virtio_bus_with_feature, vtpci_legacy_with_feature), DEVMETHOD(virtio_bus_alloc_virtqueues, vtpci_legacy_alloc_virtqueues), DEVMETHOD(virtio_bus_setup_intr, vtpci_legacy_setup_interrupts), DEVMETHOD(virtio_bus_stop, vtpci_legacy_stop), DEVMETHOD(virtio_bus_reinit, vtpci_legacy_reinit), DEVMETHOD(virtio_bus_reinit_complete, vtpci_legacy_reinit_complete), DEVMETHOD(virtio_bus_notify_vq, vtpci_legacy_notify_vq), DEVMETHOD(virtio_bus_read_device_config, vtpci_legacy_read_dev_config), DEVMETHOD(virtio_bus_write_device_config, vtpci_legacy_write_dev_config), DEVMETHOD_END }; static driver_t vtpci_legacy_driver = { .name = "virtio_pci", .methods = vtpci_legacy_methods, .size = sizeof(struct vtpci_legacy_softc) }; devclass_t vtpci_legacy_devclass; DRIVER_MODULE(virtio_pci_legacy, pci, vtpci_legacy_driver, vtpci_legacy_devclass, 0, 0); static int vtpci_legacy_probe(device_t dev) { char desc[64]; const char *name; if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID) return (ENXIO); if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN || pci_get_device(dev) > VIRTIO_PCI_DEVICEID_LEGACY_MAX) return (ENXIO); if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION) return (ENXIO); name = virtio_device_name(pci_get_subdevice(dev)); if (name == NULL) name = "Unknown"; snprintf(desc, sizeof(desc), "VirtIO PCI (legacy) %s adapter", name); device_set_desc_copy(dev, desc); /* Prefer transitional modern VirtIO PCI. */ return (BUS_PROBE_LOW_PRIORITY); } static int vtpci_legacy_attach(device_t dev) { struct vtpci_legacy_softc *sc; int error; sc = device_get_softc(dev); sc->vtpci_dev = dev; vtpci_init(&sc->vtpci_common, dev, false); error = vtpci_legacy_alloc_resources(sc); if (error) { device_printf(dev, "cannot map I/O space nor memory space\n"); return (error); } if (vtpci_is_msix_available(&sc->vtpci_common) && !vtpci_legacy_setup_msix(sc)) { device_printf(dev, "cannot setup MSI-x resources\n"); error = ENXIO; goto fail; } vtpci_legacy_reset(sc); /* Tell the host we've noticed this device. */ vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_ACK); error = vtpci_add_child(&sc->vtpci_common); if (error) goto fail; vtpci_legacy_probe_and_attach_child(sc); return (0); fail: vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED); vtpci_legacy_detach(dev); return (error); } static int vtpci_legacy_detach(device_t dev) { struct vtpci_legacy_softc *sc; int error; sc = device_get_softc(dev); error = vtpci_delete_child(&sc->vtpci_common); if (error) return (error); vtpci_legacy_reset(sc); vtpci_legacy_teardown_msix(sc); vtpci_legacy_free_resources(sc); return (0); } static int vtpci_legacy_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int vtpci_legacy_resume(device_t dev) { return (bus_generic_resume(dev)); } static int vtpci_legacy_shutdown(device_t dev) { (void) bus_generic_shutdown(dev); /* Forcibly stop the host device. */ vtpci_legacy_stop(dev); return (0); } static void vtpci_legacy_driver_added(device_t dev, driver_t *driver) { vtpci_legacy_probe_and_attach_child(device_get_softc(dev)); } static void vtpci_legacy_child_detached(device_t dev, device_t child) { struct vtpci_legacy_softc *sc; sc = device_get_softc(dev); vtpci_legacy_reset(sc); vtpci_child_detached(&sc->vtpci_common); /* After the reset, retell the host we've noticed this device. */ vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_ACK); } static int vtpci_legacy_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct vtpci_legacy_softc *sc; struct vtpci_common *cn; sc = device_get_softc(dev); cn = &sc->vtpci_common; if (vtpci_child_device(cn) != child) return (ENOENT); switch (index) { case VIRTIO_IVAR_DEVTYPE: *result = pci_get_subdevice(dev); break; default: return (vtpci_read_ivar(cn, index, result)); } return (0); } static int vtpci_legacy_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct vtpci_legacy_softc *sc; struct vtpci_common *cn; sc = device_get_softc(dev); cn = &sc->vtpci_common; if (vtpci_child_device(cn) != child) return (ENOENT); switch (index) { default: return (vtpci_write_ivar(cn, index, value)); } return (0); } static uint64_t vtpci_legacy_negotiate_features(device_t dev, uint64_t child_features) { struct vtpci_legacy_softc *sc; uint64_t host_features, features; sc = device_get_softc(dev); host_features = vtpci_legacy_read_header_4(sc, VIRTIO_PCI_HOST_FEATURES); features = vtpci_negotiate_features(&sc->vtpci_common, child_features, host_features); vtpci_legacy_write_header_4(sc, VIRTIO_PCI_GUEST_FEATURES, features); return (features); } static int vtpci_legacy_with_feature(device_t dev, uint64_t feature) { struct vtpci_legacy_softc *sc; sc = device_get_softc(dev); return (vtpci_with_feature(&sc->vtpci_common, feature)); } static int vtpci_legacy_alloc_virtqueues(device_t dev, int flags, int nvqs, struct vq_alloc_info *vq_info) { struct vtpci_legacy_softc *sc; struct vtpci_common *cn; sc = device_get_softc(dev); cn = &sc->vtpci_common; return (vtpci_alloc_virtqueues(cn, flags, nvqs, vq_info)); } static int vtpci_legacy_setup_interrupts(device_t dev, enum intr_type type) { struct vtpci_legacy_softc *sc; sc = device_get_softc(dev); return (vtpci_setup_interrupts(&sc->vtpci_common, type)); } static void vtpci_legacy_stop(device_t dev) { vtpci_legacy_reset(device_get_softc(dev)); } static int vtpci_legacy_reinit(device_t dev, uint64_t features) { struct vtpci_legacy_softc *sc; struct vtpci_common *cn; int error; sc = device_get_softc(dev); cn = &sc->vtpci_common; /* * Redrive the device initialization. This is a bit of an abuse of * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to * play nice. * * We do not allow the host device to change from what was originally * negotiated beyond what the guest driver changed. MSIX state should * not change, number of virtqueues and their size remain the same, etc. * This will need to be rethought when we want to support migration. */ if (vtpci_legacy_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET) vtpci_legacy_stop(dev); /* * Quickly drive the status through ACK and DRIVER. The device does * not become usable again until DRIVER_OK in reinit complete. */ vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_ACK); vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER); vtpci_legacy_negotiate_features(dev, features); error = vtpci_reinit(cn); if (error) return (error); return (0); } static void vtpci_legacy_reinit_complete(device_t dev) { struct vtpci_legacy_softc *sc; sc = device_get_softc(dev); vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK); } static void vtpci_legacy_notify_vq(device_t dev, uint16_t queue, bus_size_t offset) { struct vtpci_legacy_softc *sc; sc = device_get_softc(dev); MPASS(offset == VIRTIO_PCI_QUEUE_NOTIFY); vtpci_legacy_write_header_2(sc, offset, queue); } static uint8_t vtpci_legacy_get_status(struct vtpci_legacy_softc *sc) { return (vtpci_legacy_read_config_1(sc, VIRTIO_PCI_STATUS)); } static void vtpci_legacy_set_status(struct vtpci_legacy_softc *sc, uint8_t status) { if (status != VIRTIO_CONFIG_STATUS_RESET) status |= vtpci_legacy_get_status(sc); vtpci_legacy_write_config_1(sc, VIRTIO_PCI_STATUS, status); } static void vtpci_legacy_read_dev_config(device_t dev, bus_size_t offset, void *dst, int length) { struct vtpci_legacy_softc *sc; bus_size_t off; uint8_t *d; int size; sc = device_get_softc(dev); off = VIRTIO_PCI_LEGACY_CONFIG(sc) + offset; for (d = dst; length > 0; d += size, off += size, length -= size) { if (length >= 4) { size = 4; *(uint32_t *)d = vtpci_legacy_read_config_4(sc, off); } else if (length >= 2) { size = 2; *(uint16_t *)d = vtpci_legacy_read_config_2(sc, off); } else { size = 1; *d = vtpci_legacy_read_config_1(sc, off); } } } static void vtpci_legacy_write_dev_config(device_t dev, bus_size_t offset, void *src, int length) { struct vtpci_legacy_softc *sc; bus_size_t off; uint8_t *s; int size; sc = device_get_softc(dev); off = VIRTIO_PCI_LEGACY_CONFIG(sc) + offset; for (s = src; length > 0; s += size, off += size, length -= size) { if (length >= 4) { size = 4; vtpci_legacy_write_config_4(sc, off, *(uint32_t *)s); } else if (length >= 2) { size = 2; vtpci_legacy_write_config_2(sc, off, *(uint16_t *)s); } else { size = 1; vtpci_legacy_write_config_1(sc, off, *s); } } } static bool vtpci_legacy_setup_msix(struct vtpci_legacy_softc *sc) { device_t dev; int rid, table_rid; dev = sc->vtpci_dev; rid = table_rid = pci_msix_table_bar(dev); if (rid != PCIR_BAR(0)) { sc->vtpci_msix_table_res = bus_alloc_resource_any( dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->vtpci_msix_table_res == NULL) return (false); } rid = pci_msix_pba_bar(dev); if (rid != table_rid && rid != PCIR_BAR(0)) { sc->vtpci_msix_pba_res = bus_alloc_resource_any( dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->vtpci_msix_pba_res == NULL) return (false); } return (true); } static void vtpci_legacy_teardown_msix(struct vtpci_legacy_softc *sc) { device_t dev; dev = sc->vtpci_dev; if (sc->vtpci_msix_pba_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->vtpci_msix_pba_res), sc->vtpci_msix_pba_res); sc->vtpci_msix_pba_res = NULL; } if (sc->vtpci_msix_table_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->vtpci_msix_table_res), sc->vtpci_msix_table_res); sc->vtpci_msix_table_res = NULL; } } static int vtpci_legacy_alloc_resources(struct vtpci_legacy_softc *sc) { const int res_types[] = { SYS_RES_IOPORT, SYS_RES_MEMORY }; device_t dev; int rid, i; dev = sc->vtpci_dev; /* * Most hypervisors export the common configuration structure in IO * space, but some use memory space; try both. */ for (i = 0; nitems(res_types); i++) { rid = PCIR_BAR(0); sc->vtpci_res_type = res_types[i]; sc->vtpci_res = bus_alloc_resource_any(dev, res_types[i], &rid, RF_ACTIVE); if (sc->vtpci_res != NULL) break; } if (sc->vtpci_res == NULL) return (ENXIO); return (0); } static void vtpci_legacy_free_resources(struct vtpci_legacy_softc *sc) { device_t dev; dev = sc->vtpci_dev; if (sc->vtpci_res != NULL) { bus_release_resource(dev, sc->vtpci_res_type, PCIR_BAR(0), sc->vtpci_res); sc->vtpci_res = NULL; } } static void vtpci_legacy_probe_and_attach_child(struct vtpci_legacy_softc *sc) { device_t dev, child; dev = sc->vtpci_dev; child = vtpci_child_device(&sc->vtpci_common); if (child == NULL || device_get_state(child) != DS_NOTPRESENT) return; if (device_probe(child) != 0) return; vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER); if (device_attach(child) != 0) { vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED); /* Reset status for future attempt. */ vtpci_legacy_child_detached(dev, child); } else { vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK); VIRTIO_ATTACH_COMPLETED(child); } } static int vtpci_legacy_register_msix(struct vtpci_legacy_softc *sc, int offset, struct vtpci_interrupt *intr) { device_t dev; uint16_t vector; dev = sc->vtpci_dev; if (intr != NULL) { /* Map from guest rid to host vector. */ vector = intr->vti_rid - 1; } else vector = VIRTIO_MSI_NO_VECTOR; vtpci_legacy_write_header_2(sc, offset, vector); return (vtpci_legacy_read_header_2(sc, offset) == vector ? 0 : ENODEV); } static int vtpci_legacy_register_cfg_msix(device_t dev, struct vtpci_interrupt *intr) { struct vtpci_legacy_softc *sc; int error; sc = device_get_softc(dev); error = vtpci_legacy_register_msix(sc, VIRTIO_MSI_CONFIG_VECTOR, intr); if (error) { device_printf(dev, "unable to register config MSIX interrupt\n"); return (error); } return (0); } static int vtpci_legacy_register_vq_msix(device_t dev, int idx, struct vtpci_interrupt *intr) { struct vtpci_legacy_softc *sc; int error; sc = device_get_softc(dev); vtpci_legacy_select_virtqueue(sc, idx); error = vtpci_legacy_register_msix(sc, VIRTIO_MSI_QUEUE_VECTOR, intr); if (error) { device_printf(dev, "unable to register virtqueue MSIX interrupt\n"); return (error); } return (0); } static void vtpci_legacy_reset(struct vtpci_legacy_softc *sc) { /* * Setting the status to RESET sets the host device to the * original, uninitialized state. */ vtpci_legacy_set_status(sc, VIRTIO_CONFIG_STATUS_RESET); (void) vtpci_legacy_get_status(sc); } static void vtpci_legacy_select_virtqueue(struct vtpci_legacy_softc *sc, int idx) { vtpci_legacy_write_header_2(sc, VIRTIO_PCI_QUEUE_SEL, idx); } static uint8_t vtpci_legacy_read_isr(device_t dev) { struct vtpci_legacy_softc *sc; sc = device_get_softc(dev); return (vtpci_legacy_read_config_1(sc, VIRTIO_PCI_ISR)); } static uint16_t vtpci_legacy_get_vq_size(device_t dev, int idx) { struct vtpci_legacy_softc *sc; sc = device_get_softc(dev); vtpci_legacy_select_virtqueue(sc, idx); return (vtpci_legacy_read_header_2(sc, VIRTIO_PCI_QUEUE_NUM)); } static bus_size_t vtpci_legacy_get_vq_notify_off(device_t dev, int idx) { return (VIRTIO_PCI_QUEUE_NOTIFY); } static void vtpci_legacy_set_vq(device_t dev, struct virtqueue *vq) { struct vtpci_legacy_softc *sc; sc = device_get_softc(dev); vtpci_legacy_select_virtqueue(sc, virtqueue_index(vq)); vtpci_legacy_write_header_4(sc, VIRTIO_PCI_QUEUE_PFN, virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); } static void vtpci_legacy_disable_vq(device_t dev, int idx) { struct vtpci_legacy_softc *sc; sc = device_get_softc(dev); vtpci_legacy_select_virtqueue(sc, idx); vtpci_legacy_write_header_4(sc, VIRTIO_PCI_QUEUE_PFN, 0); } diff --git a/sys/dev/virtio/pci/virtio_pci_modern.c b/sys/dev/virtio/pci/virtio_pci_modern.c index 33fdebf19402..201d2b79f7ca 100644 --- a/sys/dev/virtio/pci/virtio_pci_modern.c +++ b/sys/dev/virtio/pci/virtio_pci_modern.c @@ -1,1450 +1,1450 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2017, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for the modern VirtIO PCI interface. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_bus_if.h" #include "virtio_pci_if.h" #include "virtio_if.h" struct vtpci_modern_resource_map { struct resource_map vtrm_map; int vtrm_cap_offset; int vtrm_bar; int vtrm_offset; int vtrm_length; int vtrm_type; /* SYS_RES_{MEMORY, IOPORT} */ }; struct vtpci_modern_bar_resource { struct resource *vtbr_res; int vtbr_type; }; struct vtpci_modern_softc { device_t vtpci_dev; struct vtpci_common vtpci_common; uint32_t vtpci_notify_offset_multiplier; uint16_t vtpci_devid; int vtpci_msix_bar; struct resource *vtpci_msix_res; struct vtpci_modern_resource_map vtpci_common_res_map; struct vtpci_modern_resource_map vtpci_notify_res_map; struct vtpci_modern_resource_map vtpci_isr_res_map; struct vtpci_modern_resource_map vtpci_device_res_map; #define VTPCI_MODERN_MAX_BARS 6 struct vtpci_modern_bar_resource vtpci_bar_res[VTPCI_MODERN_MAX_BARS]; }; static int vtpci_modern_probe(device_t); static int vtpci_modern_attach(device_t); static int vtpci_modern_detach(device_t); static int vtpci_modern_suspend(device_t); static int vtpci_modern_resume(device_t); static int vtpci_modern_shutdown(device_t); static void vtpci_modern_driver_added(device_t, driver_t *); static void vtpci_modern_child_detached(device_t, device_t); static int vtpci_modern_read_ivar(device_t, device_t, int, uintptr_t *); static int vtpci_modern_write_ivar(device_t, device_t, int, uintptr_t); static uint8_t vtpci_modern_read_isr(device_t); static uint16_t vtpci_modern_get_vq_size(device_t, int); static bus_size_t vtpci_modern_get_vq_notify_off(device_t, int); static void vtpci_modern_set_vq(device_t, struct virtqueue *); static void vtpci_modern_disable_vq(device_t, int); static int vtpci_modern_register_msix(struct vtpci_modern_softc *, int, struct vtpci_interrupt *); static int vtpci_modern_register_cfg_msix(device_t, struct vtpci_interrupt *); static int vtpci_modern_register_vq_msix(device_t, int idx, struct vtpci_interrupt *); static uint64_t vtpci_modern_negotiate_features(device_t, uint64_t); static int vtpci_modern_finalize_features(device_t); static int vtpci_modern_with_feature(device_t, uint64_t); static int vtpci_modern_alloc_virtqueues(device_t, int, int, struct vq_alloc_info *); static int vtpci_modern_setup_interrupts(device_t, enum intr_type); static void vtpci_modern_stop(device_t); static int vtpci_modern_reinit(device_t, uint64_t); static void vtpci_modern_reinit_complete(device_t); static void vtpci_modern_notify_vq(device_t, uint16_t, bus_size_t); static int vtpci_modern_config_generation(device_t); static void vtpci_modern_read_dev_config(device_t, bus_size_t, void *, int); static void vtpci_modern_write_dev_config(device_t, bus_size_t, void *, int); static int vtpci_modern_probe_configs(device_t); static int vtpci_modern_find_cap(device_t, uint8_t, int *); static int vtpci_modern_map_configs(struct vtpci_modern_softc *); static void vtpci_modern_unmap_configs(struct vtpci_modern_softc *); static int vtpci_modern_find_cap_resource(struct vtpci_modern_softc *, uint8_t, int, int, struct vtpci_modern_resource_map *); static int vtpci_modern_bar_type(struct vtpci_modern_softc *, int); static struct resource *vtpci_modern_get_bar_resource( struct vtpci_modern_softc *, int, int); static struct resource *vtpci_modern_alloc_bar_resource( struct vtpci_modern_softc *, int, int); static void vtpci_modern_free_bar_resources(struct vtpci_modern_softc *); static int vtpci_modern_alloc_resource_map(struct vtpci_modern_softc *, struct vtpci_modern_resource_map *); static void vtpci_modern_free_resource_map(struct vtpci_modern_softc *, struct vtpci_modern_resource_map *); static void vtpci_modern_alloc_msix_resource(struct vtpci_modern_softc *); static void vtpci_modern_free_msix_resource(struct vtpci_modern_softc *); static void vtpci_modern_probe_and_attach_child(struct vtpci_modern_softc *); static uint64_t vtpci_modern_read_features(struct vtpci_modern_softc *); static void vtpci_modern_write_features(struct vtpci_modern_softc *, uint64_t); static void vtpci_modern_select_virtqueue(struct vtpci_modern_softc *, int); static uint8_t vtpci_modern_get_status(struct vtpci_modern_softc *); static void vtpci_modern_set_status(struct vtpci_modern_softc *, uint8_t); static void vtpci_modern_reset(struct vtpci_modern_softc *); static void vtpci_modern_enable_virtqueues(struct vtpci_modern_softc *); static uint8_t vtpci_modern_read_common_1(struct vtpci_modern_softc *, bus_size_t); static uint16_t vtpci_modern_read_common_2(struct vtpci_modern_softc *, bus_size_t); static uint32_t vtpci_modern_read_common_4(struct vtpci_modern_softc *, bus_size_t); static void vtpci_modern_write_common_1(struct vtpci_modern_softc *, bus_size_t, uint8_t); static void vtpci_modern_write_common_2(struct vtpci_modern_softc *, bus_size_t, uint16_t); static void vtpci_modern_write_common_4(struct vtpci_modern_softc *, bus_size_t, uint32_t); static void vtpci_modern_write_common_8(struct vtpci_modern_softc *, bus_size_t, uint64_t); static void vtpci_modern_write_notify_2(struct vtpci_modern_softc *, bus_size_t, uint16_t); static uint8_t vtpci_modern_read_isr_1(struct vtpci_modern_softc *, bus_size_t); static uint8_t vtpci_modern_read_device_1(struct vtpci_modern_softc *, bus_size_t); static uint16_t vtpci_modern_read_device_2(struct vtpci_modern_softc *, bus_size_t); static uint32_t vtpci_modern_read_device_4(struct vtpci_modern_softc *, bus_size_t); static uint64_t vtpci_modern_read_device_8(struct vtpci_modern_softc *, bus_size_t); static void vtpci_modern_write_device_1(struct vtpci_modern_softc *, bus_size_t, uint8_t); static void vtpci_modern_write_device_2(struct vtpci_modern_softc *, bus_size_t, uint16_t); static void vtpci_modern_write_device_4(struct vtpci_modern_softc *, bus_size_t, uint32_t); static void vtpci_modern_write_device_8(struct vtpci_modern_softc *, bus_size_t, uint64_t); /* Tunables. */ static int vtpci_modern_transitional = 0; TUNABLE_INT("hw.virtio.pci.transitional", &vtpci_modern_transitional); static device_method_t vtpci_modern_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, vtpci_modern_probe), DEVMETHOD(device_attach, vtpci_modern_attach), DEVMETHOD(device_detach, vtpci_modern_detach), DEVMETHOD(device_suspend, vtpci_modern_suspend), DEVMETHOD(device_resume, vtpci_modern_resume), DEVMETHOD(device_shutdown, vtpci_modern_shutdown), /* Bus interface. */ DEVMETHOD(bus_driver_added, vtpci_modern_driver_added), DEVMETHOD(bus_child_detached, vtpci_modern_child_detached), - DEVMETHOD(bus_child_pnpinfo_str, virtio_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, virtio_child_pnpinfo), DEVMETHOD(bus_read_ivar, vtpci_modern_read_ivar), DEVMETHOD(bus_write_ivar, vtpci_modern_write_ivar), /* VirtIO PCI interface. */ DEVMETHOD(virtio_pci_read_isr, vtpci_modern_read_isr), DEVMETHOD(virtio_pci_get_vq_size, vtpci_modern_get_vq_size), DEVMETHOD(virtio_pci_get_vq_notify_off, vtpci_modern_get_vq_notify_off), DEVMETHOD(virtio_pci_set_vq, vtpci_modern_set_vq), DEVMETHOD(virtio_pci_disable_vq, vtpci_modern_disable_vq), DEVMETHOD(virtio_pci_register_cfg_msix, vtpci_modern_register_cfg_msix), DEVMETHOD(virtio_pci_register_vq_msix, vtpci_modern_register_vq_msix), /* VirtIO bus interface. */ DEVMETHOD(virtio_bus_negotiate_features, vtpci_modern_negotiate_features), DEVMETHOD(virtio_bus_finalize_features, vtpci_modern_finalize_features), DEVMETHOD(virtio_bus_with_feature, vtpci_modern_with_feature), DEVMETHOD(virtio_bus_alloc_virtqueues, vtpci_modern_alloc_virtqueues), DEVMETHOD(virtio_bus_setup_intr, vtpci_modern_setup_interrupts), DEVMETHOD(virtio_bus_stop, vtpci_modern_stop), DEVMETHOD(virtio_bus_reinit, vtpci_modern_reinit), DEVMETHOD(virtio_bus_reinit_complete, vtpci_modern_reinit_complete), DEVMETHOD(virtio_bus_notify_vq, vtpci_modern_notify_vq), DEVMETHOD(virtio_bus_config_generation, vtpci_modern_config_generation), DEVMETHOD(virtio_bus_read_device_config, vtpci_modern_read_dev_config), DEVMETHOD(virtio_bus_write_device_config, vtpci_modern_write_dev_config), DEVMETHOD_END }; static driver_t vtpci_modern_driver = { .name = "virtio_pci", .methods = vtpci_modern_methods, .size = sizeof(struct vtpci_modern_softc) }; devclass_t vtpci_modern_devclass; DRIVER_MODULE(virtio_pci_modern, pci, vtpci_modern_driver, vtpci_modern_devclass, 0, 0); static int vtpci_modern_probe(device_t dev) { char desc[64]; const char *name; uint16_t devid; if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID) return (ENXIO); if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN || pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MODERN_MAX) return (ENXIO); if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MODERN_MIN) { if (!vtpci_modern_transitional) return (ENXIO); devid = pci_get_subdevice(dev); } else devid = pci_get_device(dev) - VIRTIO_PCI_DEVICEID_MODERN_MIN; if (vtpci_modern_probe_configs(dev) != 0) return (ENXIO); name = virtio_device_name(devid); if (name == NULL) name = "Unknown"; snprintf(desc, sizeof(desc), "VirtIO PCI (modern) %s adapter", name); device_set_desc_copy(dev, desc); return (BUS_PROBE_DEFAULT); } static int vtpci_modern_attach(device_t dev) { struct vtpci_modern_softc *sc; int error; sc = device_get_softc(dev); sc->vtpci_dev = dev; vtpci_init(&sc->vtpci_common, dev, true); if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MODERN_MIN) sc->vtpci_devid = pci_get_subdevice(dev); else sc->vtpci_devid = pci_get_device(dev) - VIRTIO_PCI_DEVICEID_MODERN_MIN; error = vtpci_modern_map_configs(sc); if (error) { device_printf(dev, "cannot map configs\n"); vtpci_modern_unmap_configs(sc); return (error); } vtpci_modern_reset(sc); /* Tell the host we've noticed this device. */ vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK); error = vtpci_add_child(&sc->vtpci_common); if (error) goto fail; vtpci_modern_probe_and_attach_child(sc); return (0); fail: vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED); vtpci_modern_detach(dev); return (error); } static int vtpci_modern_detach(device_t dev) { struct vtpci_modern_softc *sc; int error; sc = device_get_softc(dev); error = vtpci_delete_child(&sc->vtpci_common); if (error) return (error); vtpci_modern_reset(sc); vtpci_modern_unmap_configs(sc); return (0); } static int vtpci_modern_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int vtpci_modern_resume(device_t dev) { return (bus_generic_resume(dev)); } static int vtpci_modern_shutdown(device_t dev) { (void) bus_generic_shutdown(dev); /* Forcibly stop the host device. */ vtpci_modern_stop(dev); return (0); } static void vtpci_modern_driver_added(device_t dev, driver_t *driver) { vtpci_modern_probe_and_attach_child(device_get_softc(dev)); } static void vtpci_modern_child_detached(device_t dev, device_t child) { struct vtpci_modern_softc *sc; sc = device_get_softc(dev); vtpci_modern_reset(sc); vtpci_child_detached(&sc->vtpci_common); /* After the reset, retell the host we've noticed this device. */ vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK); } static int vtpci_modern_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct vtpci_modern_softc *sc; struct vtpci_common *cn; sc = device_get_softc(dev); cn = &sc->vtpci_common; if (vtpci_child_device(cn) != child) return (ENOENT); switch (index) { case VIRTIO_IVAR_DEVTYPE: *result = sc->vtpci_devid; break; default: return (vtpci_read_ivar(cn, index, result)); } return (0); } static int vtpci_modern_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct vtpci_modern_softc *sc; struct vtpci_common *cn; sc = device_get_softc(dev); cn = &sc->vtpci_common; if (vtpci_child_device(cn) != child) return (ENOENT); switch (index) { default: return (vtpci_write_ivar(cn, index, value)); } return (0); } static uint64_t vtpci_modern_negotiate_features(device_t dev, uint64_t child_features) { struct vtpci_modern_softc *sc; uint64_t host_features, features; sc = device_get_softc(dev); host_features = vtpci_modern_read_features(sc); /* * Since the driver was added as a child of the modern PCI bus, * always add the V1 flag. */ child_features |= VIRTIO_F_VERSION_1; features = vtpci_negotiate_features(&sc->vtpci_common, child_features, host_features); vtpci_modern_write_features(sc, features); return (features); } static int vtpci_modern_finalize_features(device_t dev) { struct vtpci_modern_softc *sc; uint8_t status; sc = device_get_softc(dev); /* * Must re-read the status after setting it to verify the negotiated * features were accepted by the device. */ vtpci_modern_set_status(sc, VIRTIO_CONFIG_S_FEATURES_OK); status = vtpci_modern_get_status(sc); if ((status & VIRTIO_CONFIG_S_FEATURES_OK) == 0) { device_printf(dev, "desired features were not accepted\n"); return (ENOTSUP); } return (0); } static int vtpci_modern_with_feature(device_t dev, uint64_t feature) { struct vtpci_modern_softc *sc; sc = device_get_softc(dev); return (vtpci_with_feature(&sc->vtpci_common, feature)); } static uint64_t vtpci_modern_read_features(struct vtpci_modern_softc *sc) { uint32_t features0, features1; vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_DFSELECT, 0); features0 = vtpci_modern_read_common_4(sc, VIRTIO_PCI_COMMON_DF); vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_DFSELECT, 1); features1 = vtpci_modern_read_common_4(sc, VIRTIO_PCI_COMMON_DF); return (((uint64_t) features1 << 32) | features0); } static void vtpci_modern_write_features(struct vtpci_modern_softc *sc, uint64_t features) { uint32_t features0, features1; features0 = features; features1 = features >> 32; vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GFSELECT, 0); vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GF, features0); vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GFSELECT, 1); vtpci_modern_write_common_4(sc, VIRTIO_PCI_COMMON_GF, features1); } static int vtpci_modern_alloc_virtqueues(device_t dev, int flags, int nvqs, struct vq_alloc_info *vq_info) { struct vtpci_modern_softc *sc; struct vtpci_common *cn; uint16_t max_nvqs; sc = device_get_softc(dev); cn = &sc->vtpci_common; max_nvqs = vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_NUMQ); if (nvqs > max_nvqs) { device_printf(sc->vtpci_dev, "requested virtqueue count %d " "exceeds max %d\n", nvqs, max_nvqs); return (E2BIG); } return (vtpci_alloc_virtqueues(cn, flags, nvqs, vq_info)); } static int vtpci_modern_setup_interrupts(device_t dev, enum intr_type type) { struct vtpci_modern_softc *sc; int error; sc = device_get_softc(dev); error = vtpci_setup_interrupts(&sc->vtpci_common, type); if (error == 0) vtpci_modern_enable_virtqueues(sc); return (error); } static void vtpci_modern_stop(device_t dev) { vtpci_modern_reset(device_get_softc(dev)); } static int vtpci_modern_reinit(device_t dev, uint64_t features) { struct vtpci_modern_softc *sc; struct vtpci_common *cn; int error; sc = device_get_softc(dev); cn = &sc->vtpci_common; /* * Redrive the device initialization. This is a bit of an abuse of * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to * play nice. * * We do not allow the host device to change from what was originally * negotiated beyond what the guest driver changed. MSIX state should * not change, number of virtqueues and their size remain the same, etc. * This will need to be rethought when we want to support migration. */ if (vtpci_modern_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET) vtpci_modern_stop(dev); /* * Quickly drive the status through ACK and DRIVER. The device does * not become usable again until DRIVER_OK in reinit complete. */ vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_ACK); vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER); /* * TODO: Check that features are not added as to what was * originally negotiated. */ vtpci_modern_negotiate_features(dev, features); error = vtpci_modern_finalize_features(dev); if (error) { device_printf(dev, "cannot finalize features during reinit\n"); return (error); } error = vtpci_reinit(cn); if (error) return (error); return (0); } static void vtpci_modern_reinit_complete(device_t dev) { struct vtpci_modern_softc *sc; sc = device_get_softc(dev); vtpci_modern_enable_virtqueues(sc); vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK); } static void vtpci_modern_notify_vq(device_t dev, uint16_t queue, bus_size_t offset) { struct vtpci_modern_softc *sc; sc = device_get_softc(dev); vtpci_modern_write_notify_2(sc, offset, queue); } static uint8_t vtpci_modern_get_status(struct vtpci_modern_softc *sc) { return (vtpci_modern_read_common_1(sc, VIRTIO_PCI_COMMON_STATUS)); } static void vtpci_modern_set_status(struct vtpci_modern_softc *sc, uint8_t status) { if (status != VIRTIO_CONFIG_STATUS_RESET) status |= vtpci_modern_get_status(sc); vtpci_modern_write_common_1(sc, VIRTIO_PCI_COMMON_STATUS, status); } static int vtpci_modern_config_generation(device_t dev) { struct vtpci_modern_softc *sc; uint8_t gen; sc = device_get_softc(dev); gen = vtpci_modern_read_common_1(sc, VIRTIO_PCI_COMMON_CFGGENERATION); return (gen); } static void vtpci_modern_read_dev_config(device_t dev, bus_size_t offset, void *dst, int length) { struct vtpci_modern_softc *sc; sc = device_get_softc(dev); if (sc->vtpci_device_res_map.vtrm_map.r_size == 0) { panic("%s: attempt to read dev config but not present", __func__); } switch (length) { case 1: *(uint8_t *) dst = vtpci_modern_read_device_1(sc, offset); break; case 2: *(uint16_t *) dst = virtio_htog16(true, vtpci_modern_read_device_2(sc, offset)); break; case 4: *(uint32_t *) dst = virtio_htog32(true, vtpci_modern_read_device_4(sc, offset)); break; case 8: *(uint64_t *) dst = virtio_htog64(true, vtpci_modern_read_device_8(sc, offset)); break; default: panic("%s: device %s invalid device read length %d offset %d", __func__, device_get_nameunit(dev), length, (int) offset); } } static void vtpci_modern_write_dev_config(device_t dev, bus_size_t offset, void *src, int length) { struct vtpci_modern_softc *sc; sc = device_get_softc(dev); if (sc->vtpci_device_res_map.vtrm_map.r_size == 0) { panic("%s: attempt to write dev config but not present", __func__); } switch (length) { case 1: vtpci_modern_write_device_1(sc, offset, *(uint8_t *) src); break; case 2: { uint16_t val = virtio_gtoh16(true, *(uint16_t *) src); vtpci_modern_write_device_2(sc, offset, val); break; } case 4: { uint32_t val = virtio_gtoh32(true, *(uint32_t *) src); vtpci_modern_write_device_4(sc, offset, val); break; } case 8: { uint64_t val = virtio_gtoh64(true, *(uint64_t *) src); vtpci_modern_write_device_8(sc, offset, val); break; } default: panic("%s: device %s invalid device write length %d offset %d", __func__, device_get_nameunit(dev), length, (int) offset); } } static int vtpci_modern_probe_configs(device_t dev) { int error; /* * These config capabilities must be present. The DEVICE_CFG * capability is only present if the device requires it. */ error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_COMMON_CFG, NULL); if (error) { device_printf(dev, "cannot find COMMON_CFG capability\n"); return (error); } error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_NOTIFY_CFG, NULL); if (error) { device_printf(dev, "cannot find NOTIFY_CFG capability\n"); return (error); } error = vtpci_modern_find_cap(dev, VIRTIO_PCI_CAP_ISR_CFG, NULL); if (error) { device_printf(dev, "cannot find ISR_CFG capability\n"); return (error); } return (0); } static int vtpci_modern_find_cap(device_t dev, uint8_t cfg_type, int *cap_offset) { uint32_t type, bar; int capreg, error; for (error = pci_find_cap(dev, PCIY_VENDOR, &capreg); error == 0; error = pci_find_next_cap(dev, PCIY_VENDOR, capreg, &capreg)) { type = pci_read_config(dev, capreg + offsetof(struct virtio_pci_cap, cfg_type), 1); bar = pci_read_config(dev, capreg + offsetof(struct virtio_pci_cap, bar), 1); /* Must ignore reserved BARs. */ if (bar >= VTPCI_MODERN_MAX_BARS) continue; if (type == cfg_type) { if (cap_offset != NULL) *cap_offset = capreg; break; } } return (error); } static int vtpci_modern_map_common_config(struct vtpci_modern_softc *sc) { device_t dev; int error; dev = sc->vtpci_dev; error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_COMMON_CFG, sizeof(struct virtio_pci_common_cfg), 4, &sc->vtpci_common_res_map); if (error) { device_printf(dev, "cannot find cap COMMON_CFG resource\n"); return (error); } error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_common_res_map); if (error) { device_printf(dev, "cannot alloc resource for COMMON_CFG\n"); return (error); } return (0); } static int vtpci_modern_map_notify_config(struct vtpci_modern_softc *sc) { device_t dev; int cap_offset, error; dev = sc->vtpci_dev; error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_NOTIFY_CFG, -1, 2, &sc->vtpci_notify_res_map); if (error) { device_printf(dev, "cannot find cap NOTIFY_CFG resource\n"); return (error); } cap_offset = sc->vtpci_notify_res_map.vtrm_cap_offset; sc->vtpci_notify_offset_multiplier = pci_read_config(dev, cap_offset + offsetof(struct virtio_pci_notify_cap, notify_off_multiplier), 4); error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_notify_res_map); if (error) { device_printf(dev, "cannot alloc resource for NOTIFY_CFG\n"); return (error); } return (0); } static int vtpci_modern_map_isr_config(struct vtpci_modern_softc *sc) { device_t dev; int error; dev = sc->vtpci_dev; error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_ISR_CFG, sizeof(uint8_t), 1, &sc->vtpci_isr_res_map); if (error) { device_printf(dev, "cannot find cap ISR_CFG resource\n"); return (error); } error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_isr_res_map); if (error) { device_printf(dev, "cannot alloc resource for ISR_CFG\n"); return (error); } return (0); } static int vtpci_modern_map_device_config(struct vtpci_modern_softc *sc) { device_t dev; int error; dev = sc->vtpci_dev; error = vtpci_modern_find_cap_resource(sc, VIRTIO_PCI_CAP_DEVICE_CFG, -1, 4, &sc->vtpci_device_res_map); if (error == ENOENT) { /* Device configuration is optional depending on device. */ return (0); } else if (error) { device_printf(dev, "cannot find cap DEVICE_CFG resource\n"); return (error); } error = vtpci_modern_alloc_resource_map(sc, &sc->vtpci_device_res_map); if (error) { device_printf(dev, "cannot alloc resource for DEVICE_CFG\n"); return (error); } return (0); } static int vtpci_modern_map_configs(struct vtpci_modern_softc *sc) { int error; error = vtpci_modern_map_common_config(sc); if (error) return (error); error = vtpci_modern_map_notify_config(sc); if (error) return (error); error = vtpci_modern_map_isr_config(sc); if (error) return (error); error = vtpci_modern_map_device_config(sc); if (error) return (error); vtpci_modern_alloc_msix_resource(sc); return (0); } static void vtpci_modern_unmap_configs(struct vtpci_modern_softc *sc) { vtpci_modern_free_resource_map(sc, &sc->vtpci_common_res_map); vtpci_modern_free_resource_map(sc, &sc->vtpci_notify_res_map); vtpci_modern_free_resource_map(sc, &sc->vtpci_isr_res_map); vtpci_modern_free_resource_map(sc, &sc->vtpci_device_res_map); vtpci_modern_free_bar_resources(sc); vtpci_modern_free_msix_resource(sc); sc->vtpci_notify_offset_multiplier = 0; } static int vtpci_modern_find_cap_resource(struct vtpci_modern_softc *sc, uint8_t cfg_type, int min_size, int alignment, struct vtpci_modern_resource_map *res) { device_t dev; int cap_offset, offset, length, error; uint8_t bar, cap_length; dev = sc->vtpci_dev; error = vtpci_modern_find_cap(dev, cfg_type, &cap_offset); if (error) return (error); cap_length = pci_read_config(dev, cap_offset + offsetof(struct virtio_pci_cap, cap_len), 1); if (cap_length < sizeof(struct virtio_pci_cap)) { device_printf(dev, "cap %u length %d less than expected\n", cfg_type, cap_length); return (ENXIO); } bar = pci_read_config(dev, cap_offset + offsetof(struct virtio_pci_cap, bar), 1); offset = pci_read_config(dev, cap_offset + offsetof(struct virtio_pci_cap, offset), 4); length = pci_read_config(dev, cap_offset + offsetof(struct virtio_pci_cap, length), 4); if (min_size != -1 && length < min_size) { device_printf(dev, "cap %u struct length %d less than min %d\n", cfg_type, length, min_size); return (ENXIO); } if (offset % alignment) { device_printf(dev, "cap %u struct offset %d not aligned to %d\n", cfg_type, offset, alignment); return (ENXIO); } /* BMV: TODO Can we determine the size of the BAR here? */ res->vtrm_cap_offset = cap_offset; res->vtrm_bar = bar; res->vtrm_offset = offset; res->vtrm_length = length; res->vtrm_type = vtpci_modern_bar_type(sc, bar); return (0); } static int vtpci_modern_bar_type(struct vtpci_modern_softc *sc, int bar) { uint32_t val; /* * The BAR described by a config capability may be either an IOPORT or * MEM, but we must know the type when calling bus_alloc_resource(). */ val = pci_read_config(sc->vtpci_dev, PCIR_BAR(bar), 4); if (PCI_BAR_IO(val)) return (SYS_RES_IOPORT); else return (SYS_RES_MEMORY); } static struct resource * vtpci_modern_get_bar_resource(struct vtpci_modern_softc *sc, int bar, int type) { struct resource *res; MPASS(bar >= 0 && bar < VTPCI_MODERN_MAX_BARS); res = sc->vtpci_bar_res[bar].vtbr_res; MPASS(res == NULL || sc->vtpci_bar_res[bar].vtbr_type == type); return (res); } static struct resource * vtpci_modern_alloc_bar_resource(struct vtpci_modern_softc *sc, int bar, int type) { struct resource *res; int rid; MPASS(bar >= 0 && bar < VTPCI_MODERN_MAX_BARS); MPASS(type == SYS_RES_MEMORY || type == SYS_RES_IOPORT); res = sc->vtpci_bar_res[bar].vtbr_res; if (res != NULL) { MPASS(sc->vtpci_bar_res[bar].vtbr_type == type); return (res); } rid = PCIR_BAR(bar); res = bus_alloc_resource_any(sc->vtpci_dev, type, &rid, RF_ACTIVE | RF_UNMAPPED); if (res != NULL) { sc->vtpci_bar_res[bar].vtbr_res = res; sc->vtpci_bar_res[bar].vtbr_type = type; } return (res); } static void vtpci_modern_free_bar_resources(struct vtpci_modern_softc *sc) { device_t dev; struct resource *res; int bar, rid, type; dev = sc->vtpci_dev; for (bar = 0; bar < VTPCI_MODERN_MAX_BARS; bar++) { res = sc->vtpci_bar_res[bar].vtbr_res; type = sc->vtpci_bar_res[bar].vtbr_type; if (res != NULL) { rid = PCIR_BAR(bar); bus_release_resource(dev, type, rid, res); sc->vtpci_bar_res[bar].vtbr_res = NULL; sc->vtpci_bar_res[bar].vtbr_type = 0; } } } static int vtpci_modern_alloc_resource_map(struct vtpci_modern_softc *sc, struct vtpci_modern_resource_map *map) { struct resource_map_request req; struct resource *res; int type; type = map->vtrm_type; res = vtpci_modern_alloc_bar_resource(sc, map->vtrm_bar, type); if (res == NULL) return (ENXIO); resource_init_map_request(&req); req.offset = map->vtrm_offset; req.length = map->vtrm_length; return (bus_map_resource(sc->vtpci_dev, type, res, &req, &map->vtrm_map)); } static void vtpci_modern_free_resource_map(struct vtpci_modern_softc *sc, struct vtpci_modern_resource_map *map) { struct resource *res; int type; type = map->vtrm_type; res = vtpci_modern_get_bar_resource(sc, map->vtrm_bar, type); if (res != NULL && map->vtrm_map.r_size != 0) { bus_unmap_resource(sc->vtpci_dev, type, res, &map->vtrm_map); bzero(map, sizeof(struct vtpci_modern_resource_map)); } } static void vtpci_modern_alloc_msix_resource(struct vtpci_modern_softc *sc) { device_t dev; int bar; dev = sc->vtpci_dev; if (!vtpci_is_msix_available(&sc->vtpci_common) || (bar = pci_msix_table_bar(dev)) == -1) return; /* TODO: Can this BAR be in the 0-5 range? */ sc->vtpci_msix_bar = bar; if ((sc->vtpci_msix_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar, RF_ACTIVE)) == NULL) device_printf(dev, "Unable to map MSIX table\n"); } static void vtpci_modern_free_msix_resource(struct vtpci_modern_softc *sc) { device_t dev; dev = sc->vtpci_dev; if (sc->vtpci_msix_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, sc->vtpci_msix_bar, sc->vtpci_msix_res); sc->vtpci_msix_bar = 0; sc->vtpci_msix_res = NULL; } } static void vtpci_modern_probe_and_attach_child(struct vtpci_modern_softc *sc) { device_t dev, child; dev = sc->vtpci_dev; child = vtpci_child_device(&sc->vtpci_common); if (child == NULL || device_get_state(child) != DS_NOTPRESENT) return; if (device_probe(child) != 0) return; vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER); if (device_attach(child) != 0) { vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_FAILED); /* Reset state for later attempt. */ vtpci_modern_child_detached(dev, child); } else { vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_DRIVER_OK); VIRTIO_ATTACH_COMPLETED(child); } } static int vtpci_modern_register_msix(struct vtpci_modern_softc *sc, int offset, struct vtpci_interrupt *intr) { uint16_t vector; if (intr != NULL) { /* Map from guest rid to host vector. */ vector = intr->vti_rid - 1; } else vector = VIRTIO_MSI_NO_VECTOR; vtpci_modern_write_common_2(sc, offset, vector); return (vtpci_modern_read_common_2(sc, offset) == vector ? 0 : ENODEV); } static int vtpci_modern_register_cfg_msix(device_t dev, struct vtpci_interrupt *intr) { struct vtpci_modern_softc *sc; int error; sc = device_get_softc(dev); error = vtpci_modern_register_msix(sc, VIRTIO_PCI_COMMON_MSIX, intr); if (error) { device_printf(dev, "unable to register config MSIX interrupt\n"); return (error); } return (0); } static int vtpci_modern_register_vq_msix(device_t dev, int idx, struct vtpci_interrupt *intr) { struct vtpci_modern_softc *sc; int error; sc = device_get_softc(dev); vtpci_modern_select_virtqueue(sc, idx); error = vtpci_modern_register_msix(sc, VIRTIO_PCI_COMMON_Q_MSIX, intr); if (error) { device_printf(dev, "unable to register virtqueue MSIX interrupt\n"); return (error); } return (0); } static void vtpci_modern_reset(struct vtpci_modern_softc *sc) { /* * Setting the status to RESET sets the host device to the * original, uninitialized state. Must poll the status until * the reset is complete. */ vtpci_modern_set_status(sc, VIRTIO_CONFIG_STATUS_RESET); while (vtpci_modern_get_status(sc) != VIRTIO_CONFIG_STATUS_RESET) cpu_spinwait(); } static void vtpci_modern_select_virtqueue(struct vtpci_modern_softc *sc, int idx) { vtpci_modern_write_common_2(sc, VIRTIO_PCI_COMMON_Q_SELECT, idx); } static uint8_t vtpci_modern_read_isr(device_t dev) { return (vtpci_modern_read_isr_1(device_get_softc(dev), 0)); } static uint16_t vtpci_modern_get_vq_size(device_t dev, int idx) { struct vtpci_modern_softc *sc; sc = device_get_softc(dev); vtpci_modern_select_virtqueue(sc, idx); return (vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_Q_SIZE)); } static bus_size_t vtpci_modern_get_vq_notify_off(device_t dev, int idx) { struct vtpci_modern_softc *sc; uint16_t q_notify_off; sc = device_get_softc(dev); vtpci_modern_select_virtqueue(sc, idx); q_notify_off = vtpci_modern_read_common_2(sc, VIRTIO_PCI_COMMON_Q_NOFF); return (q_notify_off * sc->vtpci_notify_offset_multiplier); } static void vtpci_modern_set_vq(device_t dev, struct virtqueue *vq) { struct vtpci_modern_softc *sc; sc = device_get_softc(dev); vtpci_modern_select_virtqueue(sc, virtqueue_index(vq)); /* BMV: Currently we never adjust the device's proposed VQ size. */ vtpci_modern_write_common_2(sc, VIRTIO_PCI_COMMON_Q_SIZE, virtqueue_size(vq)); vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_DESCLO, virtqueue_desc_paddr(vq)); vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_AVAILLO, virtqueue_avail_paddr(vq)); vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_USEDLO, virtqueue_used_paddr(vq)); } static void vtpci_modern_disable_vq(device_t dev, int idx) { struct vtpci_modern_softc *sc; sc = device_get_softc(dev); vtpci_modern_select_virtqueue(sc, idx); vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_DESCLO, 0ULL); vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_AVAILLO, 0ULL); vtpci_modern_write_common_8(sc, VIRTIO_PCI_COMMON_Q_USEDLO, 0ULL); } static void vtpci_modern_enable_virtqueues(struct vtpci_modern_softc *sc) { int idx; for (idx = 0; idx < sc->vtpci_common.vtpci_nvqs; idx++) { vtpci_modern_select_virtqueue(sc, idx); vtpci_modern_write_common_2(sc, VIRTIO_PCI_COMMON_Q_ENABLE, 1); } } static uint8_t vtpci_modern_read_common_1(struct vtpci_modern_softc *sc, bus_size_t off) { return (bus_read_1(&sc->vtpci_common_res_map.vtrm_map, off)); } static uint16_t vtpci_modern_read_common_2(struct vtpci_modern_softc *sc, bus_size_t off) { return virtio_htog16(true, bus_read_2(&sc->vtpci_common_res_map.vtrm_map, off)); } static uint32_t vtpci_modern_read_common_4(struct vtpci_modern_softc *sc, bus_size_t off) { return virtio_htog32(true, bus_read_4(&sc->vtpci_common_res_map.vtrm_map, off)); } static void vtpci_modern_write_common_1(struct vtpci_modern_softc *sc, bus_size_t off, uint8_t val) { bus_write_1(&sc->vtpci_common_res_map.vtrm_map, off, val); } static void vtpci_modern_write_common_2(struct vtpci_modern_softc *sc, bus_size_t off, uint16_t val) { bus_write_2(&sc->vtpci_common_res_map.vtrm_map, off, virtio_gtoh16(true, val)); } static void vtpci_modern_write_common_4(struct vtpci_modern_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(&sc->vtpci_common_res_map.vtrm_map, off, virtio_gtoh32(true, val)); } static void vtpci_modern_write_common_8(struct vtpci_modern_softc *sc, bus_size_t off, uint64_t val) { uint32_t val0, val1; val0 = (uint32_t) val; val1 = val >> 32; vtpci_modern_write_common_4(sc, off, val0); vtpci_modern_write_common_4(sc, off + 4, val1); } static void vtpci_modern_write_notify_2(struct vtpci_modern_softc *sc, bus_size_t off, uint16_t val) { bus_write_2(&sc->vtpci_notify_res_map.vtrm_map, off, val); } static uint8_t vtpci_modern_read_isr_1(struct vtpci_modern_softc *sc, bus_size_t off) { return (bus_read_1(&sc->vtpci_isr_res_map.vtrm_map, off)); } static uint8_t vtpci_modern_read_device_1(struct vtpci_modern_softc *sc, bus_size_t off) { return (bus_read_1(&sc->vtpci_device_res_map.vtrm_map, off)); } static uint16_t vtpci_modern_read_device_2(struct vtpci_modern_softc *sc, bus_size_t off) { return (bus_read_2(&sc->vtpci_device_res_map.vtrm_map, off)); } static uint32_t vtpci_modern_read_device_4(struct vtpci_modern_softc *sc, bus_size_t off) { return (bus_read_4(&sc->vtpci_device_res_map.vtrm_map, off)); } static uint64_t vtpci_modern_read_device_8(struct vtpci_modern_softc *sc, bus_size_t off) { device_t dev; int gen; uint32_t val0, val1; dev = sc->vtpci_dev; /* * Treat the 64-bit field as two 32-bit fields. Use the generation * to ensure a consistent read. */ do { gen = vtpci_modern_config_generation(dev); val0 = vtpci_modern_read_device_4(sc, off); val1 = vtpci_modern_read_device_4(sc, off + 4); } while (gen != vtpci_modern_config_generation(dev)); return (((uint64_t) val1 << 32) | val0); } static void vtpci_modern_write_device_1(struct vtpci_modern_softc *sc, bus_size_t off, uint8_t val) { bus_write_1(&sc->vtpci_device_res_map.vtrm_map, off, val); } static void vtpci_modern_write_device_2(struct vtpci_modern_softc *sc, bus_size_t off, uint16_t val) { bus_write_2(&sc->vtpci_device_res_map.vtrm_map, off, val); } static void vtpci_modern_write_device_4(struct vtpci_modern_softc *sc, bus_size_t off, uint32_t val) { bus_write_4(&sc->vtpci_device_res_map.vtrm_map, off, val); } static void vtpci_modern_write_device_8(struct vtpci_modern_softc *sc, bus_size_t off, uint64_t val) { uint32_t val0, val1; val0 = (uint32_t) val; val1 = val >> 32; vtpci_modern_write_device_4(sc, off, val0); vtpci_modern_write_device_4(sc, off + 4, val1); } diff --git a/sys/dev/virtio/virtio.c b/sys/dev/virtio/virtio.c index 53b47004610e..673deee576f7 100644 --- a/sys/dev/virtio/virtio.c +++ b/sys/dev/virtio/virtio.c @@ -1,382 +1,381 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_bus_if.h" static int virtio_modevent(module_t, int, void *); static const char *virtio_feature_name(uint64_t, struct virtio_feature_desc *); static struct virtio_ident { uint16_t devid; const char *name; } virtio_ident_table[] = { { VIRTIO_ID_NETWORK, "Network" }, { VIRTIO_ID_BLOCK, "Block" }, { VIRTIO_ID_CONSOLE, "Console" }, { VIRTIO_ID_ENTROPY, "Entropy" }, { VIRTIO_ID_BALLOON, "Balloon" }, { VIRTIO_ID_IOMEMORY, "IOMemory" }, { VIRTIO_ID_RPMSG, "Remote Processor Messaging" }, { VIRTIO_ID_SCSI, "SCSI" }, { VIRTIO_ID_9P, "9P Transport" }, { VIRTIO_ID_RPROC_SERIAL, "Remote Processor Serial" }, { VIRTIO_ID_CAIF, "CAIF" }, { VIRTIO_ID_GPU, "GPU" }, { VIRTIO_ID_INPUT, "Input" }, { VIRTIO_ID_VSOCK, "VSOCK Transport" }, { VIRTIO_ID_CRYPTO, "Crypto" }, { 0, NULL } }; /* Device independent features. */ static struct virtio_feature_desc virtio_common_feature_desc[] = { { VIRTIO_F_NOTIFY_ON_EMPTY, "NotifyOnEmpty" }, /* Legacy */ { VIRTIO_F_ANY_LAYOUT, "AnyLayout" }, /* Legacy */ { VIRTIO_RING_F_INDIRECT_DESC, "RingIndirectDesc" }, { VIRTIO_RING_F_EVENT_IDX, "RingEventIdx" }, { VIRTIO_F_BAD_FEATURE, "BadFeature" }, /* Legacy */ { VIRTIO_F_VERSION_1, "Version1" }, { VIRTIO_F_IOMMU_PLATFORM, "IOMMUPlatform" }, { 0, NULL } }; const char * virtio_device_name(uint16_t devid) { struct virtio_ident *ident; for (ident = virtio_ident_table; ident->name != NULL; ident++) { if (ident->devid == devid) return (ident->name); } return (NULL); } static const char * virtio_feature_name(uint64_t val, struct virtio_feature_desc *desc) { int i, j; struct virtio_feature_desc *descs[2] = { desc, virtio_common_feature_desc }; for (i = 0; i < 2; i++) { if (descs[i] == NULL) continue; for (j = 0; descs[i][j].vfd_val != 0; j++) { if (val == descs[i][j].vfd_val) return (descs[i][j].vfd_str); } } return (NULL); } int virtio_describe_sbuf(struct sbuf *sb, uint64_t features, struct virtio_feature_desc *desc) { const char *name; uint64_t val; int n; sbuf_printf(sb, "%#jx", (uintmax_t) features); for (n = 0, val = 1ULL << 63; val != 0; val >>= 1) { /* * BAD_FEATURE is used to detect broken Linux clients * and therefore is not applicable to FreeBSD. */ if (((features & val) == 0) || val == VIRTIO_F_BAD_FEATURE) continue; if (n++ == 0) sbuf_cat(sb, " <"); else sbuf_cat(sb, ","); name = virtio_feature_name(val, desc); if (name == NULL) sbuf_printf(sb, "%#jx", (uintmax_t) val); else sbuf_cat(sb, name); } if (n > 0) sbuf_cat(sb, ">"); return (sbuf_finish(sb)); } void virtio_describe(device_t dev, const char *msg, uint64_t features, struct virtio_feature_desc *desc) { struct sbuf sb; char *buf; int error; if ((buf = malloc(1024, M_TEMP, M_NOWAIT)) == NULL) { error = ENOMEM; goto out; } sbuf_new(&sb, buf, 1024, SBUF_FIXEDLEN); sbuf_printf(&sb, "%s features: ", msg); error = virtio_describe_sbuf(&sb, features, desc); if (error == 0) device_printf(dev, "%s\n", sbuf_data(&sb)); sbuf_delete(&sb); free(buf, M_TEMP); out: if (error != 0) { device_printf(dev, "%s features: %#jx\n", msg, (uintmax_t) features); } } uint64_t virtio_filter_transport_features(uint64_t features) { uint64_t transport, mask; transport = (1ULL << (VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START)) - 1; transport <<= VIRTIO_TRANSPORT_F_START; mask = -1ULL & ~transport; mask |= VIRTIO_RING_F_INDIRECT_DESC; mask |= VIRTIO_RING_F_EVENT_IDX; mask |= VIRTIO_F_VERSION_1; return (features & mask); } int virtio_bus_is_modern(device_t dev) { uintptr_t modern; virtio_read_ivar(dev, VIRTIO_IVAR_MODERN, &modern); return (modern != 0); } void virtio_read_device_config_array(device_t dev, bus_size_t offset, void *dst, int size, int count) { int i, gen; do { gen = virtio_config_generation(dev); for (i = 0; i < count; i++) { virtio_read_device_config(dev, offset + i * size, (uint8_t *) dst + i * size, size); } } while (gen != virtio_config_generation(dev)); } /* * VirtIO bus method wrappers. */ void virtio_read_ivar(device_t dev, int ivar, uintptr_t *val) { *val = -1; BUS_READ_IVAR(device_get_parent(dev), dev, ivar, val); } void virtio_write_ivar(device_t dev, int ivar, uintptr_t val) { BUS_WRITE_IVAR(device_get_parent(dev), dev, ivar, val); } uint64_t virtio_negotiate_features(device_t dev, uint64_t child_features) { return (VIRTIO_BUS_NEGOTIATE_FEATURES(device_get_parent(dev), child_features)); } int virtio_finalize_features(device_t dev) { return (VIRTIO_BUS_FINALIZE_FEATURES(device_get_parent(dev))); } int virtio_alloc_virtqueues(device_t dev, int flags, int nvqs, struct vq_alloc_info *info) { return (VIRTIO_BUS_ALLOC_VIRTQUEUES(device_get_parent(dev), flags, nvqs, info)); } int virtio_setup_intr(device_t dev, enum intr_type type) { return (VIRTIO_BUS_SETUP_INTR(device_get_parent(dev), type)); } int virtio_with_feature(device_t dev, uint64_t feature) { return (VIRTIO_BUS_WITH_FEATURE(device_get_parent(dev), feature)); } void virtio_stop(device_t dev) { VIRTIO_BUS_STOP(device_get_parent(dev)); } int virtio_reinit(device_t dev, uint64_t features) { return (VIRTIO_BUS_REINIT(device_get_parent(dev), features)); } void virtio_reinit_complete(device_t dev) { VIRTIO_BUS_REINIT_COMPLETE(device_get_parent(dev)); } int virtio_config_generation(device_t dev) { return (VIRTIO_BUS_CONFIG_GENERATION(device_get_parent(dev))); } void virtio_read_device_config(device_t dev, bus_size_t offset, void *dst, int len) { VIRTIO_BUS_READ_DEVICE_CONFIG(device_get_parent(dev), offset, dst, len); } void virtio_write_device_config(device_t dev, bus_size_t offset, void *dst, int len) { VIRTIO_BUS_WRITE_DEVICE_CONFIG(device_get_parent(dev), offset, dst, len); } int -virtio_child_pnpinfo_str(device_t busdev __unused, device_t child, char *buf, - size_t buflen) +virtio_child_pnpinfo(device_t busdev __unused, device_t child, struct sbuf *sb) { /* * All of these PCI fields will be only 16 bits, but on the vtmmio bus * the corresponding fields (only "vendor" and "device_type") are 32 * bits. Many virtio drivers can attach below either bus. * Gratuitously expand these two fields to 32-bits to allow sharing PNP * match table data between the mostly-similar buses. * * Subdevice and device_type are redundant in both buses, so I don't * see a lot of PNP utility in exposing the same value under a * different name. */ - snprintf(buf, buflen, "vendor=0x%08x device=0x%04x subvendor=0x%04x " + sbuf_printf(sb, "vendor=0x%08x device=0x%04x subvendor=0x%04x " "device_type=0x%08x", (unsigned)virtio_get_vendor(child), (unsigned)virtio_get_device(child), (unsigned)virtio_get_subvendor(child), (unsigned)virtio_get_device_type(child)); return (0); } static int virtio_modevent(module_t mod, int type, void *unused) { int error; switch (type) { case MOD_LOAD: case MOD_QUIESCE: case MOD_UNLOAD: case MOD_SHUTDOWN: error = 0; break; default: error = EOPNOTSUPP; break; } return (error); } static moduledata_t virtio_mod = { "virtio", virtio_modevent, 0 }; DECLARE_MODULE(virtio, virtio_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); MODULE_VERSION(virtio, 1); diff --git a/sys/dev/virtio/virtio.h b/sys/dev/virtio/virtio.h index 8d32d5a8742f..3b2105de7175 100644 --- a/sys/dev/virtio/virtio.h +++ b/sys/dev/virtio/virtio.h @@ -1,194 +1,193 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2014, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _VIRTIO_H_ #define _VIRTIO_H_ #include #include #include #ifdef _KERNEL struct sbuf; struct vq_alloc_info; /* * Each virtqueue indirect descriptor list must be physically contiguous. * To allow us to malloc(9) each list individually, limit the number * supported to what will fit in one page. With 4KB pages, this is a limit * of 256 descriptors. If there is ever a need for more, we can switch to * contigmalloc(9) for the larger allocations, similar to what * bus_dmamem_alloc(9) does. * * Note the sizeof(struct vring_desc) is 16 bytes. */ #define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16)) /* * VirtIO instance variables indices. */ #define VIRTIO_IVAR_DEVTYPE 1 #define VIRTIO_IVAR_FEATURE_DESC 2 #define VIRTIO_IVAR_VENDOR 3 #define VIRTIO_IVAR_DEVICE 4 #define VIRTIO_IVAR_SUBVENDOR 5 #define VIRTIO_IVAR_SUBDEVICE 6 #define VIRTIO_IVAR_MODERN 7 struct virtio_feature_desc { uint64_t vfd_val; const char *vfd_str; }; #define VIRTIO_DRIVER_MODULE(name, driver, devclass, evh, arg) \ DRIVER_MODULE(name, virtio_mmio, driver, devclass, evh, arg); \ DRIVER_MODULE(name, virtio_pci, driver, devclass, evh, arg) struct virtio_pnp_match { uint32_t device_type; const char *description; }; #define VIRTIO_SIMPLE_PNPINFO(driver, devtype, desc) \ static const struct virtio_pnp_match driver ## _match = { \ .device_type = devtype, \ .description = desc, \ }; \ MODULE_PNP_INFO("U32:device_type;D:#", virtio_mmio, driver, \ &driver ## _match, 1); \ MODULE_PNP_INFO("U32:device_type;D:#", virtio_pci, driver, \ &driver ## _match, 1) #define VIRTIO_SIMPLE_PROBE(dev, driver) \ (virtio_simple_probe(dev, &driver ## _match)) const char *virtio_device_name(uint16_t devid); void virtio_describe(device_t dev, const char *msg, uint64_t features, struct virtio_feature_desc *desc); int virtio_describe_sbuf(struct sbuf *sb, uint64_t features, struct virtio_feature_desc *desc); uint64_t virtio_filter_transport_features(uint64_t features); int virtio_bus_is_modern(device_t dev); void virtio_read_device_config_array(device_t dev, bus_size_t offset, void *dst, int size, int count); /* * VirtIO Bus Methods. */ void virtio_read_ivar(device_t dev, int ivar, uintptr_t *val); void virtio_write_ivar(device_t dev, int ivar, uintptr_t val); uint64_t virtio_negotiate_features(device_t dev, uint64_t child_features); int virtio_finalize_features(device_t dev); int virtio_alloc_virtqueues(device_t dev, int flags, int nvqs, struct vq_alloc_info *info); int virtio_setup_intr(device_t dev, enum intr_type type); int virtio_with_feature(device_t dev, uint64_t feature); void virtio_stop(device_t dev); int virtio_config_generation(device_t dev); int virtio_reinit(device_t dev, uint64_t features); void virtio_reinit_complete(device_t dev); -int virtio_child_pnpinfo_str(device_t busdev, device_t child, char *buf, - size_t buflen); +int virtio_child_pnpinfo(device_t busdev, device_t child, struct sbuf *sb); /* * Read/write a variable amount from the device specific (ie, network) * configuration region. This region is encoded in the same endian as * the guest. */ void virtio_read_device_config(device_t dev, bus_size_t offset, void *dst, int length); void virtio_write_device_config(device_t dev, bus_size_t offset, void *src, int length); /* Inlined device specific read/write functions for common lengths. */ #define VIRTIO_RDWR_DEVICE_CONFIG(size, type) \ static inline type \ __CONCAT(virtio_read_dev_config_,size)(device_t dev, \ bus_size_t offset) \ { \ type val; \ virtio_read_device_config(dev, offset, &val, sizeof(type)); \ return (val); \ } \ \ static inline void \ __CONCAT(virtio_write_dev_config_,size)(device_t dev, \ bus_size_t offset, type val) \ { \ virtio_write_device_config(dev, offset, &val, sizeof(type)); \ } VIRTIO_RDWR_DEVICE_CONFIG(1, uint8_t); VIRTIO_RDWR_DEVICE_CONFIG(2, uint16_t); VIRTIO_RDWR_DEVICE_CONFIG(4, uint32_t); #undef VIRTIO_RDWR_DEVICE_CONFIG #define VIRTIO_READ_IVAR(name, ivar) \ static inline int \ __CONCAT(virtio_get_,name)(device_t dev) \ { \ uintptr_t val; \ virtio_read_ivar(dev, ivar, &val); \ return ((int) val); \ } VIRTIO_READ_IVAR(device_type, VIRTIO_IVAR_DEVTYPE); VIRTIO_READ_IVAR(vendor, VIRTIO_IVAR_VENDOR); VIRTIO_READ_IVAR(device, VIRTIO_IVAR_DEVICE); VIRTIO_READ_IVAR(subvendor, VIRTIO_IVAR_SUBVENDOR); VIRTIO_READ_IVAR(subdevice, VIRTIO_IVAR_SUBDEVICE); VIRTIO_READ_IVAR(modern, VIRTIO_IVAR_MODERN); #undef VIRTIO_READ_IVAR #define VIRTIO_WRITE_IVAR(name, ivar) \ static inline void \ __CONCAT(virtio_set_,name)(device_t dev, void *val) \ { \ virtio_write_ivar(dev, ivar, (uintptr_t) val); \ } VIRTIO_WRITE_IVAR(feature_desc, VIRTIO_IVAR_FEATURE_DESC); #undef VIRTIO_WRITE_IVAR static inline int virtio_simple_probe(device_t dev, const struct virtio_pnp_match *match) { if (virtio_get_device_type(dev) != match->device_type) return (ENXIO); device_set_desc(dev, match->description); return (BUS_PROBE_DEFAULT); } #endif /* _KERNEL */ #endif /* _VIRTIO_H_ */ diff --git a/sys/isa/isa_common.c b/sys/isa/isa_common.c index 7bc66d552e31..3255e76308c0 100644 --- a/sys/isa/isa_common.c +++ b/sys/isa/isa_common.c @@ -1,1150 +1,1147 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND MIT * * Copyright (c) 1999 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Modifications for Intel architecture by Garrett A. Wollman. * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Parts of the ISA bus implementation common to all architectures. */ #include __FBSDID("$FreeBSD$"); #include "opt_isa.h" #include #include #include #include #include #include #include #include #include +#include #include #include #include #include static int isa_print_child(device_t bus, device_t dev); static MALLOC_DEFINE(M_ISADEV, "isadev", "ISA device"); static int isa_running; /* * At 'probe' time, we add all the devices which we know about to the * bus. The generic attach routine will probe and attach them if they * are alive. */ static int isa_probe(device_t dev) { device_set_desc(dev, "ISA bus"); isa_init(dev); /* Allow machdep code to initialise */ return (0); } extern device_t isa_bus_device; static int isa_attach(device_t dev) { /* * Arrange for isa_probe_children(dev) to be called later. XXX */ isa_bus_device = dev; return (0); } /* * Find a working set of memory regions for a child using the ranges * in *config and return the regions in *result. Returns non-zero if * a set of ranges was found. */ static int isa_find_memory(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NMEM]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NMEM; i++) { bus_delete_resource(child, SYS_RES_MEMORY, i); res[i] = NULL; } success = 1; result->ic_nmem = config->ic_nmem; for (i = 0; i < config->ic_nmem; i++) { uint32_t start, end, size, align; size = config->ic_mem[i].ir_size; /* the PnP device may have a null resource as filler */ if (size == 0) { result->ic_mem[i].ir_start = 0; result->ic_mem[i].ir_end = 0; result->ic_mem[i].ir_size = 0; result->ic_mem[i].ir_align = 0; continue; } for (start = config->ic_mem[i].ir_start, end = config->ic_mem[i].ir_end, align = config->ic_mem[i].ir_align; start + size - 1 <= end && start + size > start; start += MAX(align, 1)) { bus_set_resource(child, SYS_RES_MEMORY, i, start, size); res[i] = bus_alloc_resource_any(child, SYS_RES_MEMORY, &i, rman_make_alignment_flags(align) /* !RF_ACTIVE */); if (res[i]) { result->ic_mem[i].ir_start = start; result->ic_mem[i].ir_end = start + size - 1; result->ic_mem[i].ir_size = size; result->ic_mem[i].ir_align = align; break; } } /* * If we didn't find a place for memory range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NMEM; i++) { if (res[i]) bus_release_resource(child, SYS_RES_MEMORY, i, res[i]); } return (success); } /* * Find a working set of port regions for a child using the ranges * in *config and return the regions in *result. Returns non-zero if * a set of ranges was found. */ static int isa_find_port(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NPORT]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NPORT; i++) { bus_delete_resource(child, SYS_RES_IOPORT, i); res[i] = NULL; } success = 1; result->ic_nport = config->ic_nport; for (i = 0; i < config->ic_nport; i++) { uint32_t start, end, size, align; size = config->ic_port[i].ir_size; /* the PnP device may have a null resource as filler */ if (size == 0) { result->ic_port[i].ir_start = 0; result->ic_port[i].ir_end = 0; result->ic_port[i].ir_size = 0; result->ic_port[i].ir_align = 0; continue; } for (start = config->ic_port[i].ir_start, end = config->ic_port[i].ir_end, align = config->ic_port[i].ir_align; start + size - 1 <= end; start += align) { bus_set_resource(child, SYS_RES_IOPORT, i, start, size); res[i] = bus_alloc_resource_any(child, SYS_RES_IOPORT, &i, rman_make_alignment_flags(align) /* !RF_ACTIVE */); if (res[i]) { result->ic_port[i].ir_start = start; result->ic_port[i].ir_end = start + size - 1; result->ic_port[i].ir_size = size; result->ic_port[i].ir_align = align; break; } } /* * If we didn't find a place for port range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NPORT; i++) { if (res[i]) bus_release_resource(child, SYS_RES_IOPORT, i, res[i]); } return success; } /* * Return the index of the first bit in the mask (or -1 if mask is empty. */ static int find_first_bit(uint32_t mask) { return (ffs(mask) - 1); } /* * Return the index of the next bit in the mask, or -1 if there are no more. */ static int find_next_bit(uint32_t mask, int bit) { bit++; while (bit < 32 && !(mask & (1 << bit))) bit++; if (bit != 32) return (bit); return (-1); } /* * Find a working set of irqs for a child using the masks in *config * and return the regions in *result. Returns non-zero if a set of * irqs was found. */ static int isa_find_irq(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NIRQ]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NIRQ; i++) { bus_delete_resource(child, SYS_RES_IRQ, i); res[i] = NULL; } success = 1; result->ic_nirq = config->ic_nirq; for (i = 0; i < config->ic_nirq; i++) { uint32_t mask = config->ic_irqmask[i]; int irq; /* the PnP device may have a null resource as filler */ if (mask == 0) { result->ic_irqmask[i] = 0; continue; } for (irq = find_first_bit(mask); irq != -1; irq = find_next_bit(mask, irq)) { bus_set_resource(child, SYS_RES_IRQ, i, irq, 1); res[i] = bus_alloc_resource_any(child, SYS_RES_IRQ, &i, 0 /* !RF_ACTIVE */ ); if (res[i]) { result->ic_irqmask[i] = (1 << irq); break; } } /* * If we didn't find a place for irq range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NIRQ; i++) { if (res[i]) bus_release_resource(child, SYS_RES_IRQ, i, res[i]); } return (success); } /* * Find a working set of drqs for a child using the masks in *config * and return the regions in *result. Returns non-zero if a set of * drqs was found. */ static int isa_find_drq(device_t child, struct isa_config *config, struct isa_config *result) { int success, i; struct resource *res[ISA_NDRQ]; /* * First clear out any existing resource definitions. */ for (i = 0; i < ISA_NDRQ; i++) { bus_delete_resource(child, SYS_RES_DRQ, i); res[i] = NULL; } success = 1; result->ic_ndrq = config->ic_ndrq; for (i = 0; i < config->ic_ndrq; i++) { uint32_t mask = config->ic_drqmask[i]; int drq; /* the PnP device may have a null resource as filler */ if (mask == 0) { result->ic_drqmask[i] = 0; continue; } for (drq = find_first_bit(mask); drq != -1; drq = find_next_bit(mask, drq)) { bus_set_resource(child, SYS_RES_DRQ, i, drq, 1); res[i] = bus_alloc_resource_any(child, SYS_RES_DRQ, &i, 0 /* !RF_ACTIVE */); if (res[i]) { result->ic_drqmask[i] = (1 << drq); break; } } /* * If we didn't find a place for drq range i, then * give up now. */ if (!res[i]) { success = 0; break; } } for (i = 0; i < ISA_NDRQ; i++) { if (res[i]) bus_release_resource(child, SYS_RES_DRQ, i, res[i]); } return (success); } /* * Attempt to find a working set of resources for a device. Return * non-zero if a working configuration is found. */ static int isa_assign_resources(device_t child) { struct isa_device *idev = DEVTOISA(child); struct isa_config_entry *ice; struct isa_config *cfg; const char *reason; reason = "Empty ISA id_configs"; cfg = malloc(sizeof(struct isa_config), M_TEMP, M_NOWAIT|M_ZERO); if (cfg == NULL) return(0); TAILQ_FOREACH(ice, &idev->id_configs, ice_link) { reason = "memory"; if (!isa_find_memory(child, &ice->ice_config, cfg)) continue; reason = "port"; if (!isa_find_port(child, &ice->ice_config, cfg)) continue; reason = "irq"; if (!isa_find_irq(child, &ice->ice_config, cfg)) continue; reason = "drq"; if (!isa_find_drq(child, &ice->ice_config, cfg)) continue; /* * A working configuration was found enable the device * with this configuration. */ reason = "no callback"; if (idev->id_config_cb) { idev->id_config_cb(idev->id_config_arg, cfg, 1); free(cfg, M_TEMP); return (1); } } /* * Disable the device. */ bus_print_child_header(device_get_parent(child), child); printf(" can't assign resources (%s)\n", reason); if (bootverbose) isa_print_child(device_get_parent(child), child); bzero(cfg, sizeof (*cfg)); if (idev->id_config_cb) idev->id_config_cb(idev->id_config_arg, cfg, 0); device_disable(child); free(cfg, M_TEMP); return (0); } /* * Claim any unallocated resources to keep other devices from using * them. */ static void isa_claim_resources(device_t dev, device_t child) { struct isa_device *idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; int rid; STAILQ_FOREACH(rle, rl, link) { if (!rle->res) { rid = rle->rid; resource_list_alloc(rl, dev, child, rle->type, &rid, 0, ~0, 1, 0); } } } /* * Called after other devices have initialised to probe for isa devices. */ void isa_probe_children(device_t dev) { struct isa_device *idev; device_t *children, child; struct isa_config *cfg; int nchildren, i, err; /* * Create all the non-hinted children by calling drivers' * identify methods. */ bus_generic_probe(dev); if (device_get_children(dev, &children, &nchildren)) return; /* * First disable all pnp devices so that they don't get * matched by legacy probes. */ if (bootverbose) printf("isa_probe_children: disabling PnP devices\n"); cfg = malloc(sizeof(*cfg), M_TEMP, M_NOWAIT|M_ZERO); if (cfg == NULL) { free(children, M_TEMP); return; } for (i = 0; i < nchildren; i++) { idev = DEVTOISA(children[i]); bzero(cfg, sizeof(*cfg)); if (idev->id_config_cb) idev->id_config_cb(idev->id_config_arg, cfg, 0); } free(cfg, M_TEMP); /* * Next, probe all the PnP BIOS devices so they can subsume any * hints. */ for (i = 0; i < nchildren; i++) { child = children[i]; idev = DEVTOISA(child); if (idev->id_order > ISA_ORDER_PNPBIOS) continue; if (!TAILQ_EMPTY(&idev->id_configs) && !isa_assign_resources(child)) continue; if (device_probe_and_attach(child) == 0) isa_claim_resources(dev, child); } free(children, M_TEMP); /* * Next, enumerate hinted devices and probe all non-pnp devices so * that they claim their resources first. */ bus_enumerate_hinted_children(dev); if (device_get_children(dev, &children, &nchildren)) return; if (bootverbose) printf("isa_probe_children: probing non-PnP devices\n"); for (i = 0; i < nchildren; i++) { child = children[i]; idev = DEVTOISA(child); if (device_is_attached(child) || !TAILQ_EMPTY(&idev->id_configs)) continue; err = device_probe_and_attach(child); if (err == 0 && idev->id_vendorid == 0 && strcmp(kern_ident, "GENERIC") == 0 && device_is_attached(child)) device_printf(child, "non-PNP ISA device will be removed from GENERIC in FreeBSD 14.\n"); } /* * Finally assign resource to pnp devices and probe them. */ if (bootverbose) printf("isa_probe_children: probing PnP devices\n"); for (i = 0; i < nchildren; i++) { child = children[i]; idev = DEVTOISA(child); if (device_is_attached(child) || TAILQ_EMPTY(&idev->id_configs)) continue; if (isa_assign_resources(child)) { device_probe_and_attach(child); isa_claim_resources(dev, child); } } free(children, M_TEMP); isa_running = 1; } /* * Add a new child with default ivars. */ static device_t isa_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct isa_device *idev; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); idev = malloc(sizeof(struct isa_device), M_ISADEV, M_NOWAIT | M_ZERO); if (!idev) return (0); resource_list_init(&idev->id_resources); TAILQ_INIT(&idev->id_configs); idev->id_order = order; device_set_ivars(child, idev); return (child); } static int isa_print_all_resources(device_t dev) { struct isa_device *idev = DEVTOISA(dev); struct resource_list *rl = &idev->id_resources; int retval = 0; if (STAILQ_FIRST(rl) || device_get_flags(dev)) retval += printf(" at"); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += resource_list_print_type(rl, "drq", SYS_RES_DRQ, "%jd"); if (device_get_flags(dev)) retval += printf(" flags %#x", device_get_flags(dev)); if (idev->id_vendorid) retval += printf(" pnpid %s", pnp_eisaformat(idev->id_vendorid)); return (retval); } static int isa_print_child(device_t bus, device_t dev) { int retval = 0; retval += bus_print_child_header(bus, dev); retval += isa_print_all_resources(dev); retval += bus_print_child_footer(bus, dev); return (retval); } static void isa_probe_nomatch(device_t dev, device_t child) { if (bootverbose) { bus_print_child_header(dev, child); printf(" failed to probe"); isa_print_all_resources(child); bus_print_child_footer(dev, child); } return; } static int isa_read_ivar(device_t bus, device_t dev, int index, uintptr_t * result) { struct isa_device* idev = DEVTOISA(dev); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; switch (index) { case ISA_IVAR_PORT_0: rle = resource_list_find(rl, SYS_RES_IOPORT, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_PORT_1: rle = resource_list_find(rl, SYS_RES_IOPORT, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_PORTSIZE_0: rle = resource_list_find(rl, SYS_RES_IOPORT, 0); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_PORTSIZE_1: rle = resource_list_find(rl, SYS_RES_IOPORT, 1); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_MADDR_0: rle = resource_list_find(rl, SYS_RES_MEMORY, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_MADDR_1: rle = resource_list_find(rl, SYS_RES_MEMORY, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_MEMSIZE_0: rle = resource_list_find(rl, SYS_RES_MEMORY, 0); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_MEMSIZE_1: rle = resource_list_find(rl, SYS_RES_MEMORY, 1); if (rle) *result = rle->count; else *result = 0; break; case ISA_IVAR_IRQ_0: rle = resource_list_find(rl, SYS_RES_IRQ, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_IRQ_1: rle = resource_list_find(rl, SYS_RES_IRQ, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_DRQ_0: rle = resource_list_find(rl, SYS_RES_DRQ, 0); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_DRQ_1: rle = resource_list_find(rl, SYS_RES_DRQ, 1); if (rle) *result = rle->start; else *result = -1; break; case ISA_IVAR_VENDORID: *result = idev->id_vendorid; break; case ISA_IVAR_SERIAL: *result = idev->id_serial; break; case ISA_IVAR_LOGICALID: *result = idev->id_logicalid; break; case ISA_IVAR_COMPATID: *result = idev->id_compatid; break; case ISA_IVAR_CONFIGATTR: *result = idev->id_config_attr; break; case ISA_IVAR_PNP_CSN: *result = idev->id_pnp_csn; break; case ISA_IVAR_PNP_LDN: *result = idev->id_pnp_ldn; break; case ISA_IVAR_PNPBIOS_HANDLE: *result = idev->id_pnpbios_handle; break; default: return (ENOENT); } return (0); } static int isa_write_ivar(device_t bus, device_t dev, int index, uintptr_t value) { struct isa_device* idev = DEVTOISA(dev); switch (index) { case ISA_IVAR_PORT_0: case ISA_IVAR_PORT_1: case ISA_IVAR_PORTSIZE_0: case ISA_IVAR_PORTSIZE_1: case ISA_IVAR_MADDR_0: case ISA_IVAR_MADDR_1: case ISA_IVAR_MEMSIZE_0: case ISA_IVAR_MEMSIZE_1: case ISA_IVAR_IRQ_0: case ISA_IVAR_IRQ_1: case ISA_IVAR_DRQ_0: case ISA_IVAR_DRQ_1: return (EINVAL); case ISA_IVAR_VENDORID: idev->id_vendorid = value; break; case ISA_IVAR_SERIAL: idev->id_serial = value; break; case ISA_IVAR_LOGICALID: idev->id_logicalid = value; break; case ISA_IVAR_COMPATID: idev->id_compatid = value; break; case ISA_IVAR_CONFIGATTR: idev->id_config_attr = value; break; default: return (ENOENT); } return (0); } /* * Free any resources which the driver missed or which we were holding for * it (see isa_probe_children). */ static void isa_child_detached(device_t dev, device_t child) { struct isa_device* idev = DEVTOISA(child); if (TAILQ_FIRST(&idev->id_configs)) isa_claim_resources(dev, child); } static void isa_driver_added(device_t dev, driver_t *driver) { device_t *children; int nchildren, i; /* * Don't do anything if drivers are dynamically * added during autoconfiguration (cf. ymf724). * since that would end up calling identify * twice. */ if (!isa_running) return; DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &children, &nchildren)) return; for (i = 0; i < nchildren; i++) { device_t child = children[i]; struct isa_device *idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; if (device_get_state(child) != DS_NOTPRESENT) continue; if (!device_is_enabled(child)) continue; /* * Free resources which we were holding on behalf of * the device. */ STAILQ_FOREACH(rle, &idev->id_resources, link) { if (rle->res) resource_list_release(rl, dev, child, rle->type, rle->rid, rle->res); } if (TAILQ_FIRST(&idev->id_configs)) if (!isa_assign_resources(child)) continue; device_probe_and_attach(child); if (TAILQ_FIRST(&idev->id_configs)) isa_claim_resources(dev, child); } free(children, M_TEMP); } static int isa_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; if (type != SYS_RES_IOPORT && type != SYS_RES_MEMORY && type != SYS_RES_IRQ && type != SYS_RES_DRQ) return (EINVAL); if (rid < 0) return (EINVAL); if (type == SYS_RES_IOPORT && rid >= ISA_NPORT) return (EINVAL); if (type == SYS_RES_MEMORY && rid >= ISA_NMEM) return (EINVAL); if (type == SYS_RES_IRQ && rid >= ISA_NIRQ) return (EINVAL); if (type == SYS_RES_DRQ && rid >= ISA_NDRQ) return (EINVAL); resource_list_add(rl, type, rid, start, start + count - 1, count); return (0); } static struct resource_list * isa_get_resource_list (device_t dev, device_t child) { struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; if (!rl) return (NULL); return (rl); } static int isa_add_config(device_t dev, device_t child, int priority, struct isa_config *config) { struct isa_device* idev = DEVTOISA(child); struct isa_config_entry *newice, *ice; newice = malloc(sizeof *ice, M_DEVBUF, M_NOWAIT); if (!newice) return (ENOMEM); newice->ice_priority = priority; newice->ice_config = *config; TAILQ_FOREACH(ice, &idev->id_configs, ice_link) { if (ice->ice_priority > priority) break; } if (ice) TAILQ_INSERT_BEFORE(ice, newice, ice_link); else TAILQ_INSERT_TAIL(&idev->id_configs, newice, ice_link); return (0); } static void isa_set_config_callback(device_t dev, device_t child, isa_config_cb *fn, void *arg) { struct isa_device* idev = DEVTOISA(child); idev->id_config_cb = fn; idev->id_config_arg = arg; } static int isa_pnp_probe(device_t dev, device_t child, struct isa_pnp_id *ids) { struct isa_device* idev = DEVTOISA(child); if (!idev->id_vendorid) return (ENOENT); while (ids && ids->ip_id) { /* * Really ought to support >1 compat id per device. */ if (idev->id_logicalid == ids->ip_id || idev->id_compatid == ids->ip_id) { if (ids->ip_desc) device_set_desc(child, ids->ip_desc); return (0); } ids++; } return (ENXIO); } static int -isa_child_pnpinfo_str(device_t bus, device_t child, char *buf, - size_t buflen) +isa_child_pnpinfo(device_t bus, device_t child, struct sbuf *sb) { struct isa_device *idev = DEVTOISA(child); if (idev->id_vendorid) - snprintf(buf, buflen, "pnpid=%s", + sbuf_printf(sb, "pnpid=%s", pnp_eisaformat(idev->id_vendorid)); return (0); } static int -isa_child_location_str(device_t bus, device_t child, char *buf, - size_t buflen) +isa_child_location(device_t bus, device_t child, struct sbuf *sb) { #if 0 /* id_pnphandle isn't there yet */ struct isa_device *idev = DEVTOISA(child); if (idev->id_vendorid) - snprintf(buf, buflen, "pnphandle=%d", idev->id_pnphandle); + sbuf_printf(sbuf, "pnphandle=%d", idev->id_pnphandle); #endif - /* Nothing here yet */ - *buf = '\0'; return (0); } static device_method_t isa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, isa_probe), DEVMETHOD(device_attach, isa_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_add_child, isa_add_child), DEVMETHOD(bus_print_child, isa_print_child), DEVMETHOD(bus_probe_nomatch, isa_probe_nomatch), DEVMETHOD(bus_read_ivar, isa_read_ivar), DEVMETHOD(bus_write_ivar, isa_write_ivar), DEVMETHOD(bus_child_detached, isa_child_detached), DEVMETHOD(bus_driver_added, isa_driver_added), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_get_resource_list,isa_get_resource_list), DEVMETHOD(bus_alloc_resource, isa_alloc_resource), DEVMETHOD(bus_release_resource, isa_release_resource), DEVMETHOD(bus_set_resource, isa_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, bus_generic_rl_delete_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), - DEVMETHOD(bus_child_pnpinfo_str, isa_child_pnpinfo_str), - DEVMETHOD(bus_child_location_str, isa_child_location_str), + DEVMETHOD(bus_child_pnpinfo, isa_child_pnpinfo), + DEVMETHOD(bus_child_location, isa_child_location), DEVMETHOD(bus_hinted_child, isa_hinted_child), DEVMETHOD(bus_hint_device_unit, isa_hint_device_unit), /* ISA interface */ DEVMETHOD(isa_add_config, isa_add_config), DEVMETHOD(isa_set_config_callback, isa_set_config_callback), DEVMETHOD(isa_pnp_probe, isa_pnp_probe), { 0, 0 } }; DEFINE_CLASS_0(isa, isa_driver, isa_methods, 0); devclass_t isa_devclass; /* * ISA can be attached to a PCI-ISA bridge, or other locations on some * platforms. */ DRIVER_MODULE(isa, isab, isa_driver, isa_devclass, 0, 0); DRIVER_MODULE(isa, eisab, isa_driver, isa_devclass, 0, 0); MODULE_VERSION(isa, 1); /* * Code common to ISA bridges. */ devclass_t isab_devclass; int isab_attach(device_t dev) { device_t child; child = device_add_child(dev, "isa", 0); if (child != NULL) return (bus_generic_attach(dev)); return (ENXIO); } char * pnp_eisaformat(uint32_t id) { uint8_t *data; static char idbuf[8]; const char hextoascii[] = "0123456789abcdef"; id = htole32(id); data = (uint8_t *)&id; idbuf[0] = '@' + ((data[0] & 0x7c) >> 2); idbuf[1] = '@' + (((data[0] & 0x3) << 3) + ((data[1] & 0xe0) >> 5)); idbuf[2] = '@' + (data[1] & 0x1f); idbuf[3] = hextoascii[(data[2] >> 4)]; idbuf[4] = hextoascii[(data[2] & 0xf)]; idbuf[5] = hextoascii[(data[3] >> 4)]; idbuf[6] = hextoascii[(data[3] & 0xf)]; idbuf[7] = 0; return(idbuf); } diff --git a/sys/kern/bus_if.m b/sys/kern/bus_if.m index 250f2192e573..71d83954ef4d 100644 --- a/sys/kern/bus_if.m +++ b/sys/kern/bus_if.m @@ -1,934 +1,926 @@ #- # Copyright (c) 1998-2004 Doug Rabson # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # $FreeBSD$ # #include #include #include /** * @defgroup BUS bus - KObj methods for drivers of devices with children * @brief A set of methods required device drivers that support * child devices. * @{ */ INTERFACE bus; # # Default implementations of some methods. # CODE { static struct resource * null_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { return (0); } static int null_remap_intr(device_t bus, device_t dev, u_int irq) { if (dev != NULL) return (BUS_REMAP_INTR(dev, NULL, irq)); return (ENXIO); } static device_t null_add_child(device_t bus, int order, const char *name, int unit) { panic("bus_add_child is not implemented"); } static int null_reset_post(device_t bus, device_t dev) { return (0); } static int null_reset_prepare(device_t bus, device_t dev) { return (0); } static int null_translate_resource(device_t bus, int type, rman_res_t start, rman_res_t *newstart) { if (device_get_parent(bus) != NULL) return (BUS_TRANSLATE_RESOURCE(device_get_parent(bus), type, start, newstart)); *newstart = start; return (0); } }; /** * @brief Print a description of a child device * * This is called from system code which prints out a description of a * device. It should describe the attachment that the child has with * the parent. For instance the TurboLaser bus prints which node the * device is attached to. See bus_generic_print_child() for more * information. * * @param _dev the device whose child is being printed * @param _child the child device to describe * * @returns the number of characters output. */ METHOD int print_child { device_t _dev; device_t _child; } DEFAULT bus_generic_print_child; /** * @brief Print a notification about an unprobed child device. * * Called for each child device that did not succeed in probing for a * driver. * * @param _dev the device whose child was being probed * @param _child the child device which failed to probe */ METHOD void probe_nomatch { device_t _dev; device_t _child; }; /** * @brief Read the value of a bus-specific attribute of a device * * This method, along with BUS_WRITE_IVAR() manages a bus-specific set * of instance variables of a child device. The intention is that * each different type of bus defines a set of appropriate instance * variables (such as ports and irqs for ISA bus etc.) * * This information could be given to the child device as a struct but * that makes it hard for a bus to add or remove variables without * forcing an edit and recompile for all drivers which may not be * possible for vendor supplied binary drivers. * * This method copies the value of an instance variable to the * location specified by @p *_result. * * @param _dev the device whose child was being examined * @param _child the child device whose instance variable is * being read * @param _index the instance variable to read * @param _result a location to receive the instance variable * value * * @retval 0 success * @retval ENOENT no such instance variable is supported by @p * _dev */ METHOD int read_ivar { device_t _dev; device_t _child; int _index; uintptr_t *_result; }; /** * @brief Write the value of a bus-specific attribute of a device * * This method sets the value of an instance variable to @p _value. * * @param _dev the device whose child was being updated * @param _child the child device whose instance variable is * being written * @param _index the instance variable to write * @param _value the value to write to that instance variable * * @retval 0 success * @retval ENOENT no such instance variable is supported by @p * _dev * @retval EINVAL the instance variable was recognised but * contains a read-only value */ METHOD int write_ivar { device_t _dev; device_t _child; int _indx; uintptr_t _value; }; /** * @brief Notify a bus that a child was deleted * * Called at the beginning of device_delete_child() to allow the parent * to teardown any bus-specific state for the child. * * @param _dev the device whose child is being deleted * @param _child the child device which is being deleted */ METHOD void child_deleted { device_t _dev; device_t _child; }; /** * @brief Notify a bus that a child was detached * * Called after the child's DEVICE_DETACH() method to allow the parent * to reclaim any resources allocated on behalf of the child. * * @param _dev the device whose child changed state * @param _child the child device which changed state */ METHOD void child_detached { device_t _dev; device_t _child; }; /** * @brief Notify a bus that a new driver was added * * Called when a new driver is added to the devclass which owns this * bus. The generic implementation of this method attempts to probe and * attach any un-matched children of the bus. * * @param _dev the device whose devclass had a new driver * added to it * @param _driver the new driver which was added */ METHOD void driver_added { device_t _dev; driver_t *_driver; } DEFAULT bus_generic_driver_added; /** * @brief Create a new child device * * For buses which use use drivers supporting DEVICE_IDENTIFY() to * enumerate their devices, this method is used to create new * device instances. The new device will be added after the last * existing child with the same order. Implementations of bus_add_child * call device_add_child_ordered to add the child and often add * a suitable ivar to the device specific to that bus. * * @param _dev the bus device which will be the parent of the * new child device * @param _order a value which is used to partially sort the * children of @p _dev - devices created using * lower values of @p _order appear first in @p * _dev's list of children * @param _name devclass name for new device or @c NULL if not * specified * @param _unit unit number for new device or @c -1 if not * specified */ METHOD device_t add_child { device_t _dev; u_int _order; const char *_name; int _unit; } DEFAULT null_add_child; /** * @brief Rescan the bus * * This method is called by a parent bridge or devctl to trigger a bus * rescan. The rescan should delete devices no longer present and * enumerate devices that have newly arrived. * * @param _dev the bus device */ METHOD int rescan { device_t _dev; } /** * @brief Allocate a system resource * * This method is called by child devices of a bus to allocate resources. * The types are defined in ; the meaning of the * resource-ID field varies from bus to bus (but @p *rid == 0 is always * valid if the resource type is). If a resource was allocated and the * caller did not use the RF_ACTIVE to specify that it should be * activated immediately, the caller is responsible for calling * BUS_ACTIVATE_RESOURCE() when it actually uses the resource. * * @param _dev the parent device of @p _child * @param _child the device which is requesting an allocation * @param _type the type of resource to allocate * @param _rid a pointer to the resource identifier * @param _start hint at the start of the resource range - pass * @c 0 for any start address * @param _end hint at the end of the resource range - pass * @c ~0 for any end address * @param _count hint at the size of range required - pass @c 1 * for any size * @param _flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ METHOD struct resource * alloc_resource { device_t _dev; device_t _child; int _type; int *_rid; rman_res_t _start; rman_res_t _end; rman_res_t _count; u_int _flags; } DEFAULT null_alloc_resource; /** * @brief Activate a resource * * Activate a resource previously allocated with * BUS_ALLOC_RESOURCE(). This may enable decoding of this resource in a * device for instance. It will also establish a mapping for the resource * unless RF_UNMAPPED was set when allocating the resource. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _type the type of resource * @param _rid the resource identifier * @param _r the resource to activate */ METHOD int activate_resource { device_t _dev; device_t _child; int _type; int _rid; struct resource *_r; }; /** * @brief Map a resource * * Allocate a mapping for a range of an active resource. The mapping * is described by a struct resource_map object. This may for instance * map a memory region into the kernel's virtual address space. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _type the type of resource * @param _r the resource to map * @param _args optional attributes of the mapping * @param _map the mapping */ METHOD int map_resource { device_t _dev; device_t _child; int _type; struct resource *_r; struct resource_map_request *_args; struct resource_map *_map; } DEFAULT bus_generic_map_resource; /** * @brief Unmap a resource * * Release a mapping previously allocated with * BUS_MAP_RESOURCE(). This may for instance unmap a memory region * from the kernel's virtual address space. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _type the type of resource * @param _r the resource * @param _map the mapping to release */ METHOD int unmap_resource { device_t _dev; device_t _child; int _type; struct resource *_r; struct resource_map *_map; } DEFAULT bus_generic_unmap_resource; /** * @brief Deactivate a resource * * Deactivate a resource previously allocated with * BUS_ALLOC_RESOURCE(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _type the type of resource * @param _rid the resource identifier * @param _r the resource to deactivate */ METHOD int deactivate_resource { device_t _dev; device_t _child; int _type; int _rid; struct resource *_r; }; /** * @brief Adjust a resource * * Adjust the start and/or end of a resource allocated by * BUS_ALLOC_RESOURCE. At least part of the new address range must overlap * with the existing address range. If the successful, the resource's range * will be adjusted to [start, end] on return. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _type the type of resource * @param _res the resource to adjust * @param _start the new starting address of the resource range * @param _end the new ending address of the resource range */ METHOD int adjust_resource { device_t _dev; device_t _child; int _type; struct resource *_res; rman_res_t _start; rman_res_t _end; }; /** * @brief translate a resource value * * * @param _dev the device associated with the resource * @param _type the type of resource * @param _start the starting address of the resource range * @param _newstart the new starting address of the resource range */ METHOD int translate_resource { device_t _dev; int _type; rman_res_t _start; rman_res_t *_newstart; } DEFAULT null_translate_resource; /** * @brief Release a resource * * Free a resource allocated by the BUS_ALLOC_RESOURCE. The @p _rid * value must be the same as the one returned by BUS_ALLOC_RESOURCE() * (which is not necessarily the same as the one the client passed). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _type the type of resource * @param _rid the resource identifier * @param _r the resource to release */ METHOD int release_resource { device_t _dev; device_t _child; int _type; int _rid; struct resource *_res; }; /** * @brief Install an interrupt handler * * This method is used to associate an interrupt handler function with * an irq resource. When the interrupt triggers, the function @p _intr * will be called with the value of @p _arg as its single * argument. The value returned in @p *_cookiep is used to cancel the * interrupt handler - the caller should save this value to use in a * future call to BUS_TEARDOWN_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _flags a set of bits from enum intr_type specifying * the class of interrupt * @param _intr the function to call when the interrupt * triggers * @param _arg a value to use as the single argument in calls * to @p _intr * @param _cookiep a pointer to a location to receive a cookie * value that may be used to remove the interrupt * handler */ METHOD int setup_intr { device_t _dev; device_t _child; struct resource *_irq; int _flags; driver_filter_t *_filter; driver_intr_t *_intr; void *_arg; void **_cookiep; }; /** * @brief Uninstall an interrupt handler * * This method is used to disassociate an interrupt handler function * with an irq resource. The value of @p _cookie must be the value * returned from a previous call to BUS_SETUP_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _cookie the cookie value returned when the interrupt * was originally registered */ METHOD int teardown_intr { device_t _dev; device_t _child; struct resource *_irq; void *_cookie; }; /** * @brief Suspend an interrupt handler * * This method is used to mark a handler as suspended in the case * that the associated device is powered down and cannot be a source * for the, typically shared, interrupt. * The value of @p _irq must be the interrupt resource passed * to a previous call to BUS_SETUP_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt */ METHOD int suspend_intr { device_t _dev; device_t _child; struct resource *_irq; } DEFAULT bus_generic_suspend_intr; /** * @brief Resume an interrupt handler * * This method is used to clear suspended state of a handler when * the associated device is powered up and can be an interrupt source * again. * The value of @p _irq must be the interrupt resource passed * to a previous call to BUS_SETUP_INTR(). * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt */ METHOD int resume_intr { device_t _dev; device_t _child; struct resource *_irq; } DEFAULT bus_generic_resume_intr; /** * @brief Define a resource which can be allocated with * BUS_ALLOC_RESOURCE(). * * This method is used by some buses (typically ISA) to allow a * driver to describe a resource range that it would like to * allocate. The resource defined by @p _type and @p _rid is defined * to start at @p _start and to include @p _count indices in its * range. * * @param _dev the parent device of @p _child * @param _child the device which owns the resource * @param _type the type of resource * @param _rid the resource identifier * @param _start the start of the resource range * @param _count the size of the resource range */ METHOD int set_resource { device_t _dev; device_t _child; int _type; int _rid; rman_res_t _start; rman_res_t _count; }; /** * @brief Describe a resource * * This method allows a driver to examine the range used for a given * resource without actually allocating it. * * @param _dev the parent device of @p _child * @param _child the device which owns the resource * @param _type the type of resource * @param _rid the resource identifier * @param _start the address of a location to receive the start * index of the resource range * @param _count the address of a location to receive the size * of the resource range */ METHOD int get_resource { device_t _dev; device_t _child; int _type; int _rid; rman_res_t *_startp; rman_res_t *_countp; }; /** * @brief Delete a resource. * * Use this to delete a resource (possibly one previously added with * BUS_SET_RESOURCE()). * * @param _dev the parent device of @p _child * @param _child the device which owns the resource * @param _type the type of resource * @param _rid the resource identifier */ METHOD void delete_resource { device_t _dev; device_t _child; int _type; int _rid; }; /** * @brief Return a struct resource_list. * * Used by drivers which use bus_generic_rl_alloc_resource() etc. to * implement their resource handling. It should return the resource * list of the given child device. * * @param _dev the parent device of @p _child * @param _child the device which owns the resource list */ METHOD struct resource_list * get_resource_list { device_t _dev; device_t _child; } DEFAULT bus_generic_get_resource_list; /** * @brief Is the hardware described by @p _child still attached to the * system? * * This method should return 0 if the device is not present. It * should return -1 if it is present. Any errors in determining * should be returned as a normal errno value. Client drivers are to * assume that the device is present, even if there is an error * determining if it is there. Buses are to try to avoid returning * errors, but newcard will return an error if the device fails to * implement this method. * * @param _dev the parent device of @p _child * @param _child the device which is being examined */ METHOD int child_present { device_t _dev; device_t _child; } DEFAULT bus_generic_child_present; /** * @brief Returns the pnp info for this device. * - * Return it as a string. If the storage is insufficient for the - * string, then return EOVERFLOW. + * Return it as a string, appended to @p _sb * * The string must be formatted as a space-separated list of * name=value pairs. Names may only contain alphanumeric characters, * underscores ('_') and hyphens ('-'). Values can contain any * non-whitespace characters. Values containing whitespace can be * quoted with double quotes ('"'). Double quotes and backslashes in * quoted values can be escaped with backslashes ('\'). * * @param _dev the parent device of @p _child * @param _child the device which is being examined - * @param _buf the address of a buffer to receive the pnp - * string - * @param _buflen the size of the buffer pointed to by @p _buf + * @param _sb sbuf for results string */ -METHOD int child_pnpinfo_str { +METHOD int child_pnpinfo { device_t _dev; device_t _child; - char *_buf; - size_t _buflen; -}; + struct sbuf *_sb; +} DEFAULT bus_generic_child_pnpinfo; /** * @brief Returns the location for this device. * - * Return it as a string. If the storage is insufficient for the - * string, then return EOVERFLOW. + * Return it as a string, appended to @p _sb * * The string must be formatted as a space-separated list of * name=value pairs. Names may only contain alphanumeric characters, * underscores ('_') and hyphens ('-'). Values can contain any * non-whitespace characters. Values containing whitespace can be * quoted with double quotes ('"'). Double quotes and backslashes in * quoted values can be escaped with backslashes ('\'). * * @param _dev the parent device of @p _child * @param _child the device which is being examined - * @param _buf the address of a buffer to receive the location - * string - * @param _buflen the size of the buffer pointed to by @p _buf + * @param _sb sbuf for results string */ -METHOD int child_location_str { +METHOD int child_location { device_t _dev; device_t _child; - char *_buf; - size_t _buflen; -}; + struct sbuf *_sb; +} DEFAULT bus_generic_child_location; /** * @brief Allow drivers to request that an interrupt be bound to a specific * CPU. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _cpu the CPU to bind the interrupt to */ METHOD int bind_intr { device_t _dev; device_t _child; struct resource *_irq; int _cpu; } DEFAULT bus_generic_bind_intr; /** * @brief Allow (bus) drivers to specify the trigger mode and polarity * of the specified interrupt. * * @param _dev the bus device * @param _irq the interrupt number to modify * @param _trig the trigger mode required * @param _pol the interrupt polarity required */ METHOD int config_intr { device_t _dev; int _irq; enum intr_trigger _trig; enum intr_polarity _pol; } DEFAULT bus_generic_config_intr; /** * @brief Allow drivers to associate a description with an active * interrupt handler. * * @param _dev the parent device of @p _child * @param _child the device which allocated the resource * @param _irq the resource representing the interrupt * @param _cookie the cookie value returned when the interrupt * was originally registered * @param _descr the description to associate with the interrupt */ METHOD int describe_intr { device_t _dev; device_t _child; struct resource *_irq; void *_cookie; const char *_descr; } DEFAULT bus_generic_describe_intr; /** * @brief Notify a (bus) driver about a child that the hints mechanism * believes it has discovered. * * The bus is responsible for then adding the child in the right order * and discovering other things about the child. The bus driver is * free to ignore this hint, to do special things, etc. It is all up * to the bus driver to interpret. * * This method is only called in response to the parent bus asking for * hinted devices to be enumerated. * * @param _dev the bus device * @param _dname the name of the device w/o unit numbers * @param _dunit the unit number of the device */ METHOD void hinted_child { device_t _dev; const char *_dname; int _dunit; }; /** * @brief Returns bus_dma_tag_t for use w/ devices on the bus. * * @param _dev the parent device of @p _child * @param _child the device to which the tag will belong */ METHOD bus_dma_tag_t get_dma_tag { device_t _dev; device_t _child; } DEFAULT bus_generic_get_dma_tag; /** * @brief Returns bus_space_tag_t for use w/ devices on the bus. * * @param _dev the parent device of @p _child * @param _child the device to which the tag will belong */ METHOD bus_space_tag_t get_bus_tag { device_t _dev; device_t _child; } DEFAULT bus_generic_get_bus_tag; /** * @brief Allow the bus to determine the unit number of a device. * * @param _dev the parent device of @p _child * @param _child the device whose unit is to be wired * @param _name the name of the device's new devclass * @param _unitp a pointer to the device's new unit value */ METHOD void hint_device_unit { device_t _dev; device_t _child; const char *_name; int *_unitp; }; /** * @brief Notify a bus that the bus pass level has been changed * * @param _dev the bus device */ METHOD void new_pass { device_t _dev; } DEFAULT bus_generic_new_pass; /** * @brief Notify a bus that specified child's IRQ should be remapped. * * @param _dev the bus device * @param _child the child device * @param _irq the irq number */ METHOD int remap_intr { device_t _dev; device_t _child; u_int _irq; } DEFAULT null_remap_intr; /** * @brief Suspend a given child * * @param _dev the parent device of @p _child * @param _child the device to suspend */ METHOD int suspend_child { device_t _dev; device_t _child; } DEFAULT bus_generic_suspend_child; /** * @brief Resume a given child * * @param _dev the parent device of @p _child * @param _child the device to resume */ METHOD int resume_child { device_t _dev; device_t _child; } DEFAULT bus_generic_resume_child; /** * @brief Get the VM domain handle for the given bus and child. * * @param _dev the bus device * @param _child the child device * @param _domain a pointer to the bus's domain handle identifier */ METHOD int get_domain { device_t _dev; device_t _child; int *_domain; } DEFAULT bus_generic_get_domain; /** * @brief Request a set of CPUs * * @param _dev the bus device * @param _child the child device * @param _op type of CPUs to request * @param _setsize the size of the set passed in _cpuset * @param _cpuset a pointer to a cpuset to receive the requested * set of CPUs */ METHOD int get_cpus { device_t _dev; device_t _child; enum cpu_sets _op; size_t _setsize; struct _cpuset *_cpuset; } DEFAULT bus_generic_get_cpus; /** * @brief Prepares the given child of the bus for reset * * Typically bus detaches or suspends children' drivers, and then * calls this method to save bus-specific information, for instance, * PCI config space, which is damaged by reset. * * The bus_helper_reset_prepare() helper is provided to ease * implementing bus reset methods. * * @param _dev the bus device * @param _child the child device */ METHOD int reset_prepare { device_t _dev; device_t _child; } DEFAULT null_reset_prepare; /** * @brief Restores the child operations after the reset * * The bus_helper_reset_post() helper is provided to ease * implementing bus reset methods. * * @param _dev the bus device * @param _child the child device */ METHOD int reset_post { device_t _dev; device_t _child; } DEFAULT null_reset_post; /** * @brief Performs reset of the child * * @param _dev the bus device * @param _child the child device * @param _flags DEVF_RESET_ flags */ METHOD int reset_child { device_t _dev; device_t _child; int _flags; }; diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c index 12399df40a5d..fc2048561cf6 100644 --- a/sys/kern/subr_bus.c +++ b/sys/kern/subr_bus.c @@ -1,6077 +1,6029 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997,1998,2003 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_bus.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw, OID_AUTO, bus, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); SYSCTL_ROOT_NODE(OID_AUTO, dev, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, NULL); /* * Used to attach drivers to devclasses. */ typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ int pass; int flags; #define DL_DEFERRED_PROBE 1 /* Probe deferred on this */ TAILQ_ENTRY(driverlink) passlink; }; /* * Forward declarations */ typedef TAILQ_HEAD(devclass_list, devclass) devclass_list_t; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; typedef TAILQ_HEAD(device_list, _device) device_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ int flags; #define DC_HAS_CHILDREN 1 struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; }; /** * @brief Implementation of _device. * * The structure is named "_device" instead of "device" to avoid type confusion * caused by other subsystems defining a (struct device). */ struct _device { /* * A device is a kernel object. The first field must be the * current ops table for the object. */ KOBJ_FIELDS; /* * Device hierarchy. */ TAILQ_ENTRY(_device) link; /**< list of devices in parent */ TAILQ_ENTRY(_device) devlink; /**< global device list membership */ device_t parent; /**< parent of this device */ device_list_t children; /**< list of child devices */ /* * Details of this device. */ driver_t *driver; /**< current driver */ devclass_t devclass; /**< current device class */ int unit; /**< current unit number */ char* nameunit; /**< name+unit e.g. foodev0 */ char* desc; /**< driver specific description */ int busy; /**< count of calls to device_busy() */ device_state_t state; /**< current device state */ uint32_t devflags; /**< api level flags for device_get_flags() */ u_int flags; /**< internal device flags */ u_int order; /**< order from device_add_child_ordered() */ void *ivars; /**< instance variables */ void *softc; /**< current driver's variables */ struct sysctl_ctx_list sysctl_ctx; /**< state for sysctl variables */ struct sysctl_oid *sysctl_tree; /**< state for sysctl variables */ }; static MALLOC_DEFINE(M_BUS, "bus", "Bus data structures"); static MALLOC_DEFINE(M_BUS_SC, "bus-sc", "Bus data structures, softc"); EVENTHANDLER_LIST_DEFINE(device_attach); EVENTHANDLER_LIST_DEFINE(device_detach); EVENTHANDLER_LIST_DEFINE(dev_lookup); -static int bus_child_location_sb(device_t child, struct sbuf *sb); -static int bus_child_pnpinfo_sb(device_t child, struct sbuf *sb); static void devctl2_init(void); static bool device_frozen; #define DRIVERNAME(d) ((d)? d->name : "no driver") #define DEVCLANAME(d) ((d)? d->name : "no devclass") #ifdef BUS_DEBUG static int bus_debug = 1; SYSCTL_INT(_debug, OID_AUTO, bus_debug, CTLFLAG_RWTUN, &bus_debug, 0, "Bus debug level"); #define PDEBUG(a) if (bus_debug) {printf("%s:%d: ", __func__, __LINE__), printf a; printf("\n");} #define DEVICENAME(d) ((d)? device_get_name(d): "no device") /** * Produce the indenting, indent*2 spaces plus a '.' ahead of that to * prevent syslog from deleting initial spaces */ #define indentprintf(p) do { int iJ; printf("."); for (iJ=0; iJparent ? dc->parent->name : ""; break; default: return (EINVAL); } return (SYSCTL_OUT_STR(req, value)); } static void devclass_sysctl_init(devclass_t dc) { if (dc->sysctl_tree != NULL) return; sysctl_ctx_init(&dc->sysctl_ctx); dc->sysctl_tree = SYSCTL_ADD_NODE(&dc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), OID_AUTO, dc->name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); SYSCTL_ADD_PROC(&dc->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, dc, DEVCLASS_SYSCTL_PARENT, devclass_sysctl_handler, "A", "parent class"); } enum { DEVICE_SYSCTL_DESC, DEVICE_SYSCTL_DRIVER, DEVICE_SYSCTL_LOCATION, DEVICE_SYSCTL_PNPINFO, DEVICE_SYSCTL_PARENT, }; static int device_sysctl_handler(SYSCTL_HANDLER_ARGS) { struct sbuf sb; device_t dev = (device_t)arg1; int error; sbuf_new_for_sysctl(&sb, NULL, 1024, req); sbuf_clear_flags(&sb, SBUF_INCLUDENUL); switch (arg2) { case DEVICE_SYSCTL_DESC: sbuf_cat(&sb, dev->desc ? dev->desc : ""); break; case DEVICE_SYSCTL_DRIVER: sbuf_cat(&sb, dev->driver ? dev->driver->name : ""); break; case DEVICE_SYSCTL_LOCATION: - bus_child_location_sb(dev, &sb); + bus_child_location(dev, &sb); break; case DEVICE_SYSCTL_PNPINFO: - bus_child_pnpinfo_sb(dev, &sb); + bus_child_pnpinfo(dev, &sb); break; case DEVICE_SYSCTL_PARENT: sbuf_cat(&sb, dev->parent ? dev->parent->nameunit : ""); break; default: sbuf_delete(&sb); return (EINVAL); } error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } static void device_sysctl_init(device_t dev) { devclass_t dc = dev->devclass; int domain; if (dev->sysctl_tree != NULL) return; devclass_sysctl_init(dc); sysctl_ctx_init(&dev->sysctl_ctx); dev->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&dev->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, dev->nameunit + strlen(dc->name), CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "", "device_index"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%desc", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, dev, DEVICE_SYSCTL_DESC, device_sysctl_handler, "A", "device description"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%driver", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, dev, DEVICE_SYSCTL_DRIVER, device_sysctl_handler, "A", "device driver name"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%location", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, dev, DEVICE_SYSCTL_LOCATION, device_sysctl_handler, "A", "device location relative to parent"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%pnpinfo", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, dev, DEVICE_SYSCTL_PNPINFO, device_sysctl_handler, "A", "device identification"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, dev, DEVICE_SYSCTL_PARENT, device_sysctl_handler, "A", "parent device"); if (bus_get_domain(dev, &domain) == 0) SYSCTL_ADD_INT(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%domain", CTLFLAG_RD, NULL, domain, "NUMA domain"); } static void device_sysctl_update(device_t dev) { devclass_t dc = dev->devclass; if (dev->sysctl_tree == NULL) return; sysctl_rename_oid(dev->sysctl_tree, dev->nameunit + strlen(dc->name)); } static void device_sysctl_fini(device_t dev) { if (dev->sysctl_tree == NULL) return; sysctl_ctx_free(&dev->sysctl_ctx); dev->sysctl_tree = NULL; } /* * /dev/devctl implementation */ /* * This design allows only one reader for /dev/devctl. This is not desirable * in the long run, but will get a lot of hair out of this implementation. * Maybe we should make this device a clonable device. * * Also note: we specifically do not attach a device to the device_t tree * to avoid potential chicken and egg problems. One could argue that all * of this belongs to the root node. */ #define DEVCTL_DEFAULT_QUEUE_LEN 1000 static int sysctl_devctl_queue(SYSCTL_HANDLER_ARGS); static int devctl_queue_length = DEVCTL_DEFAULT_QUEUE_LEN; SYSCTL_PROC(_hw_bus, OID_AUTO, devctl_queue, CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 0, sysctl_devctl_queue, "I", "devctl queue length"); static d_open_t devopen; static d_close_t devclose; static d_read_t devread; static d_ioctl_t devioctl; static d_poll_t devpoll; static d_kqfilter_t devkqfilter; static struct cdevsw dev_cdevsw = { .d_version = D_VERSION, .d_open = devopen, .d_close = devclose, .d_read = devread, .d_ioctl = devioctl, .d_poll = devpoll, .d_kqfilter = devkqfilter, .d_name = "devctl", }; #define DEVCTL_BUFFER (1024 - sizeof(void *)) struct dev_event_info { STAILQ_ENTRY(dev_event_info) dei_link; char dei_data[DEVCTL_BUFFER]; }; STAILQ_HEAD(devq, dev_event_info); static struct dev_softc { int inuse; int nonblock; int queued; int async; struct mtx mtx; struct cv cv; struct selinfo sel; struct devq devq; struct sigio *sigio; uma_zone_t zone; } devsoftc; static void filt_devctl_detach(struct knote *kn); static int filt_devctl_read(struct knote *kn, long hint); struct filterops devctl_rfiltops = { .f_isfd = 1, .f_detach = filt_devctl_detach, .f_event = filt_devctl_read, }; static struct cdev *devctl_dev; static void devinit(void) { int reserve; uma_zone_t z; devctl_dev = make_dev_credf(MAKEDEV_ETERNAL, &dev_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600, "devctl"); mtx_init(&devsoftc.mtx, "dev mtx", "devd", MTX_DEF); cv_init(&devsoftc.cv, "dev cv"); STAILQ_INIT(&devsoftc.devq); knlist_init_mtx(&devsoftc.sel.si_note, &devsoftc.mtx); if (devctl_queue_length > 0) { /* * Allocate a zone for the messages. Preallocate 2% of these for * a reserve. Allow only devctl_queue_length slabs to cap memory * usage. The reserve usually allows coverage of surges of * events during memory shortages. Normally we won't have to * re-use events from the queue, but will in extreme shortages. */ z = devsoftc.zone = uma_zcreate("DEVCTL", sizeof(struct dev_event_info), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); reserve = max(devctl_queue_length / 50, 100); /* 2% reserve */ uma_zone_set_max(z, devctl_queue_length); uma_zone_set_maxcache(z, 0); uma_zone_reserve(z, reserve); uma_prealloc(z, reserve); } devctl2_init(); } static int devopen(struct cdev *dev, int oflags, int devtype, struct thread *td) { mtx_lock(&devsoftc.mtx); if (devsoftc.inuse) { mtx_unlock(&devsoftc.mtx); return (EBUSY); } /* move to init */ devsoftc.inuse = 1; mtx_unlock(&devsoftc.mtx); return (0); } static int devclose(struct cdev *dev, int fflag, int devtype, struct thread *td) { mtx_lock(&devsoftc.mtx); devsoftc.inuse = 0; devsoftc.nonblock = 0; devsoftc.async = 0; cv_broadcast(&devsoftc.cv); funsetown(&devsoftc.sigio); mtx_unlock(&devsoftc.mtx); return (0); } /* * The read channel for this device is used to report changes to * userland in realtime. We are required to free the data as well as * the n1 object because we allocate them separately. Also note that * we return one record at a time. If you try to read this device a * character at a time, you will lose the rest of the data. Listening * programs are expected to cope. */ static int devread(struct cdev *dev, struct uio *uio, int ioflag) { struct dev_event_info *n1; int rv; mtx_lock(&devsoftc.mtx); while (STAILQ_EMPTY(&devsoftc.devq)) { if (devsoftc.nonblock) { mtx_unlock(&devsoftc.mtx); return (EAGAIN); } rv = cv_wait_sig(&devsoftc.cv, &devsoftc.mtx); if (rv) { /* * Need to translate ERESTART to EINTR here? -- jake */ mtx_unlock(&devsoftc.mtx); return (rv); } } n1 = STAILQ_FIRST(&devsoftc.devq); STAILQ_REMOVE_HEAD(&devsoftc.devq, dei_link); devsoftc.queued--; mtx_unlock(&devsoftc.mtx); rv = uiomove(n1->dei_data, strlen(n1->dei_data), uio); uma_zfree(devsoftc.zone, n1); return (rv); } static int devioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { switch (cmd) { case FIONBIO: if (*(int*)data) devsoftc.nonblock = 1; else devsoftc.nonblock = 0; return (0); case FIOASYNC: if (*(int*)data) devsoftc.async = 1; else devsoftc.async = 0; return (0); case FIOSETOWN: return fsetown(*(int *)data, &devsoftc.sigio); case FIOGETOWN: *(int *)data = fgetown(&devsoftc.sigio); return (0); /* (un)Support for other fcntl() calls. */ case FIOCLEX: case FIONCLEX: case FIONREAD: default: break; } return (ENOTTY); } static int devpoll(struct cdev *dev, int events, struct thread *td) { int revents = 0; mtx_lock(&devsoftc.mtx); if (events & (POLLIN | POLLRDNORM)) { if (!STAILQ_EMPTY(&devsoftc.devq)) revents = events & (POLLIN | POLLRDNORM); else selrecord(td, &devsoftc.sel); } mtx_unlock(&devsoftc.mtx); return (revents); } static int devkqfilter(struct cdev *dev, struct knote *kn) { int error; if (kn->kn_filter == EVFILT_READ) { kn->kn_fop = &devctl_rfiltops; knlist_add(&devsoftc.sel.si_note, kn, 0); error = 0; } else error = EINVAL; return (error); } static void filt_devctl_detach(struct knote *kn) { knlist_remove(&devsoftc.sel.si_note, kn, 0); } static int filt_devctl_read(struct knote *kn, long hint) { kn->kn_data = devsoftc.queued; return (kn->kn_data != 0); } /** * @brief Return whether the userland process is running */ bool devctl_process_running(void) { return (devsoftc.inuse == 1); } static struct dev_event_info * devctl_alloc_dei(void) { struct dev_event_info *dei = NULL; mtx_lock(&devsoftc.mtx); if (devctl_queue_length == 0) goto out; dei = uma_zalloc(devsoftc.zone, M_NOWAIT); if (dei == NULL) dei = uma_zalloc(devsoftc.zone, M_NOWAIT | M_USE_RESERVE); if (dei == NULL) { /* * Guard against no items in the queue. Normally, this won't * happen, but if lots of events happen all at once and there's * a chance we're out of allocated space but none have yet been * queued when we get here, leaving nothing to steal. This can * also happen with error injection. Fail safe by returning * NULL in that case.. */ if (devsoftc.queued == 0) goto out; dei = STAILQ_FIRST(&devsoftc.devq); STAILQ_REMOVE_HEAD(&devsoftc.devq, dei_link); devsoftc.queued--; } MPASS(dei != NULL); *dei->dei_data = '\0'; out: mtx_unlock(&devsoftc.mtx); return (dei); } static struct dev_event_info * devctl_alloc_dei_sb(struct sbuf *sb) { struct dev_event_info *dei; dei = devctl_alloc_dei(); if (dei != NULL) sbuf_new(sb, dei->dei_data, sizeof(dei->dei_data), SBUF_FIXEDLEN); return (dei); } static void devctl_free_dei(struct dev_event_info *dei) { uma_zfree(devsoftc.zone, dei); } static void devctl_queue(struct dev_event_info *dei) { mtx_lock(&devsoftc.mtx); STAILQ_INSERT_TAIL(&devsoftc.devq, dei, dei_link); devsoftc.queued++; cv_broadcast(&devsoftc.cv); KNOTE_LOCKED(&devsoftc.sel.si_note, 0); mtx_unlock(&devsoftc.mtx); selwakeup(&devsoftc.sel); if (devsoftc.async && devsoftc.sigio != NULL) pgsigio(&devsoftc.sigio, SIGIO, 0); } /** * @brief Send a 'notification' to userland, using standard ways */ void devctl_notify(const char *system, const char *subsystem, const char *type, const char *data) { struct dev_event_info *dei; struct sbuf sb; if (system == NULL || subsystem == NULL || type == NULL) return; dei = devctl_alloc_dei_sb(&sb); if (dei == NULL) return; sbuf_cpy(&sb, "!system="); sbuf_cat(&sb, system); sbuf_cat(&sb, " subsystem="); sbuf_cat(&sb, subsystem); sbuf_cat(&sb, " type="); sbuf_cat(&sb, type); if (data != NULL) { sbuf_putc(&sb, ' '); sbuf_cat(&sb, data); } sbuf_putc(&sb, '\n'); if (sbuf_finish(&sb) != 0) devctl_free_dei(dei); /* overflow -> drop it */ else devctl_queue(dei); } /* * Common routine that tries to make sending messages as easy as possible. * We allocate memory for the data, copy strings into that, but do not * free it unless there's an error. The dequeue part of the driver should * free the data. We don't send data when the device is disabled. We do * send data, even when we have no listeners, because we wish to avoid * races relating to startup and restart of listening applications. * * devaddq is designed to string together the type of event, with the * object of that event, plus the plug and play info and location info * for that event. This is likely most useful for devices, but less * useful for other consumers of this interface. Those should use * the devctl_notify() interface instead. * * Output: * ${type}${what} at $(location dev) $(pnp-info dev) on $(parent dev) */ static void devaddq(const char *type, const char *what, device_t dev) { struct dev_event_info *dei; const char *parstr; struct sbuf sb; dei = devctl_alloc_dei_sb(&sb); if (dei == NULL) return; sbuf_cpy(&sb, type); sbuf_cat(&sb, what); sbuf_cat(&sb, " at "); /* Add in the location */ - bus_child_location_sb(dev, &sb); + bus_child_location(dev, &sb); sbuf_putc(&sb, ' '); /* Add in pnpinfo */ - bus_child_pnpinfo_sb(dev, &sb); + bus_child_pnpinfo(dev, &sb); /* Get the parent of this device, or / if high enough in the tree. */ if (device_get_parent(dev) == NULL) parstr = "."; /* Or '/' ? */ else parstr = device_get_nameunit(device_get_parent(dev)); sbuf_cat(&sb, " on "); sbuf_cat(&sb, parstr); sbuf_putc(&sb, '\n'); if (sbuf_finish(&sb) != 0) goto bad; devctl_queue(dei); return; bad: devctl_free_dei(dei); } /* * A device was added to the tree. We are called just after it successfully * attaches (that is, probe and attach success for this device). No call * is made if a device is merely parented into the tree. See devnomatch * if probe fails. If attach fails, no notification is sent (but maybe * we should have a different message for this). */ static void devadded(device_t dev) { devaddq("+", device_get_nameunit(dev), dev); } /* * A device was removed from the tree. We are called just before this * happens. */ static void devremoved(device_t dev) { devaddq("-", device_get_nameunit(dev), dev); } /* * Called when there's no match for this device. This is only called * the first time that no match happens, so we don't keep getting this * message. Should that prove to be undesirable, we can change it. * This is called when all drivers that can attach to a given bus * decline to accept this device. Other errors may not be detected. */ static void devnomatch(device_t dev) { devaddq("?", "", dev); } static int sysctl_devctl_queue(SYSCTL_HANDLER_ARGS) { int q, error; q = devctl_queue_length; error = sysctl_handle_int(oidp, &q, 0, req); if (error || !req->newptr) return (error); if (q < 0) return (EINVAL); /* * When set as a tunable, we've not yet initialized the mutex. * It is safe to just assign to devctl_queue_length and return * as we're racing no one. We'll use whatever value set in * devinit. */ if (!mtx_initialized(&devsoftc.mtx)) { devctl_queue_length = q; return (0); } /* * XXX It's hard to grow or shrink the UMA zone. Only allow * disabling the queue size for the moment until underlying * UMA issues can be sorted out. */ if (q != 0) return (EINVAL); if (q == devctl_queue_length) return (0); mtx_lock(&devsoftc.mtx); devctl_queue_length = 0; uma_zdestroy(devsoftc.zone); devsoftc.zone = 0; mtx_unlock(&devsoftc.mtx); return (0); } /** * @brief safely quotes strings that might have double quotes in them. * * The devctl protocol relies on quoted strings having matching quotes. * This routine quotes any internal quotes so the resulting string * is safe to pass to snprintf to construct, for example pnp info strings. * * @param sb sbuf to place the characters into * @param src Original buffer. */ void devctl_safe_quote_sb(struct sbuf *sb, const char *src) { while (*src != '\0') { if (*src == '"' || *src == '\\') sbuf_putc(sb, '\\'); sbuf_putc(sb, *src++); } } /* End of /dev/devctl code */ static struct device_list bus_data_devices; static int bus_data_generation = 1; static kobj_method_t null_methods[] = { KOBJMETHOD_END }; DEFINE_CLASS(null, null_methods, 0); /* * Bus pass implementation */ static driver_list_t passes = TAILQ_HEAD_INITIALIZER(passes); int bus_current_pass = BUS_PASS_ROOT; /** * @internal * @brief Register the pass level of a new driver attachment * * Register a new driver attachment's pass level. If no driver * attachment with the same pass level has been added, then @p new * will be added to the global passes list. * * @param new the new driver attachment */ static void driver_register_pass(struct driverlink *new) { struct driverlink *dl; /* We only consider pass numbers during boot. */ if (bus_current_pass == BUS_PASS_DEFAULT) return; /* * Walk the passes list. If we already know about this pass * then there is nothing to do. If we don't, then insert this * driver link into the list. */ TAILQ_FOREACH(dl, &passes, passlink) { if (dl->pass < new->pass) continue; if (dl->pass == new->pass) return; TAILQ_INSERT_BEFORE(dl, new, passlink); return; } TAILQ_INSERT_TAIL(&passes, new, passlink); } /** * @brief Raise the current bus pass * * Raise the current bus pass level to @p pass. Call the BUS_NEW_PASS() * method on the root bus to kick off a new device tree scan for each * new pass level that has at least one driver. */ void bus_set_pass(int pass) { struct driverlink *dl; if (bus_current_pass > pass) panic("Attempt to lower bus pass level"); TAILQ_FOREACH(dl, &passes, passlink) { /* Skip pass values below the current pass level. */ if (dl->pass <= bus_current_pass) continue; /* * Bail once we hit a driver with a pass level that is * too high. */ if (dl->pass > pass) break; /* * Raise the pass level to the next level and rescan * the tree. */ bus_current_pass = dl->pass; BUS_NEW_PASS(root_bus); } /* * If there isn't a driver registered for the requested pass, * then bus_current_pass might still be less than 'pass'. Set * it to 'pass' in that case. */ if (bus_current_pass < pass) bus_current_pass = pass; KASSERT(bus_current_pass == pass, ("Failed to update bus pass level")); } /* * Devclass implementation */ static devclass_list_t devclasses = TAILQ_HEAD_INITIALIZER(devclasses); /** * @internal * @brief Find or create a device class * * If a device class with the name @p classname exists, return it, * otherwise if @p create is non-zero create and return a new device * class. * * If @p parentname is non-NULL, the parent of the devclass is set to * the devclass of that name. * * @param classname the devclass name to find or create * @param parentname the parent devclass name or @c NULL * @param create non-zero to create a devclass */ static devclass_t devclass_find_internal(const char *classname, const char *parentname, int create) { devclass_t dc; PDEBUG(("looking for %s", classname)); if (!classname) return (NULL); TAILQ_FOREACH(dc, &devclasses, link) { if (!strcmp(dc->name, classname)) break; } if (create && !dc) { PDEBUG(("creating %s", classname)); dc = malloc(sizeof(struct devclass) + strlen(classname) + 1, M_BUS, M_NOWAIT | M_ZERO); if (!dc) return (NULL); dc->parent = NULL; dc->name = (char*) (dc + 1); strcpy(dc->name, classname); TAILQ_INIT(&dc->drivers); TAILQ_INSERT_TAIL(&devclasses, dc, link); bus_data_generation_update(); } /* * If a parent class is specified, then set that as our parent so * that this devclass will support drivers for the parent class as * well. If the parent class has the same name don't do this though * as it creates a cycle that can trigger an infinite loop in * device_probe_child() if a device exists for which there is no * suitable driver. */ if (parentname && dc && !dc->parent && strcmp(classname, parentname) != 0) { dc->parent = devclass_find_internal(parentname, NULL, TRUE); dc->parent->flags |= DC_HAS_CHILDREN; } return (dc); } /** * @brief Create a device class * * If a device class with the name @p classname exists, return it, * otherwise create and return a new device class. * * @param classname the devclass name to find or create */ devclass_t devclass_create(const char *classname) { return (devclass_find_internal(classname, NULL, TRUE)); } /** * @brief Find a device class * * If a device class with the name @p classname exists, return it, * otherwise return @c NULL. * * @param classname the devclass name to find */ devclass_t devclass_find(const char *classname) { return (devclass_find_internal(classname, NULL, FALSE)); } /** * @brief Register that a device driver has been added to a devclass * * Register that a device driver has been added to a devclass. This * is called by devclass_add_driver to accomplish the recursive * notification of all the children classes of dc, as well as dc. * Each layer will have BUS_DRIVER_ADDED() called for all instances of * the devclass. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param dc the devclass to edit * @param driver the driver that was just added */ static void devclass_driver_added(devclass_t dc, driver_t *driver) { devclass_t parent; int i; /* * Call BUS_DRIVER_ADDED for any existing buses in this class. */ for (i = 0; i < dc->maxunit; i++) if (dc->devices[i] && device_is_attached(dc->devices[i])) BUS_DRIVER_ADDED(dc->devices[i], driver); /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(dc->flags & DC_HAS_CHILDREN)) return; parent = dc; TAILQ_FOREACH(dc, &devclasses, link) { if (dc->parent == parent) devclass_driver_added(dc, driver); } } /** * @brief Add a device driver to a device class * * Add a device driver to a devclass. This is normally called * automatically by DRIVER_MODULE(). The BUS_DRIVER_ADDED() method of * all devices in the devclass will be called to allow them to attempt * to re-probe any unmatched children. * * @param dc the devclass to edit * @param driver the driver to register */ int devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp) { driverlink_t dl; const char *parentname; PDEBUG(("%s", DRIVERNAME(driver))); /* Don't allow invalid pass values. */ if (pass <= BUS_PASS_ROOT) return (EINVAL); dl = malloc(sizeof *dl, M_BUS, M_NOWAIT|M_ZERO); if (!dl) return (ENOMEM); /* * Compile the driver's methods. Also increase the reference count * so that the class doesn't get freed when the last instance * goes. This means we can safely use static methods and avoids a * double-free in devclass_delete_driver. */ kobj_class_compile((kobj_class_t) driver); /* * If the driver has any base classes, make the * devclass inherit from the devclass of the driver's * first base class. This will allow the system to * search for drivers in both devclasses for children * of a device using this driver. */ if (driver->baseclasses) parentname = driver->baseclasses[0]->name; else parentname = NULL; *dcp = devclass_find_internal(driver->name, parentname, TRUE); dl->driver = driver; TAILQ_INSERT_TAIL(&dc->drivers, dl, link); driver->refs++; /* XXX: kobj_mtx */ dl->pass = pass; driver_register_pass(dl); if (device_frozen) { dl->flags |= DL_DEFERRED_PROBE; } else { devclass_driver_added(dc, driver); } bus_data_generation_update(); return (0); } /** * @brief Register that a device driver has been deleted from a devclass * * Register that a device driver has been removed from a devclass. * This is called by devclass_delete_driver to accomplish the * recursive notification of all the children classes of busclass, as * well as busclass. Each layer will attempt to detach the driver * from any devices that are children of the bus's devclass. The function * will return an error if a device fails to detach. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param busclass the devclass of the parent bus * @param dc the devclass of the driver being deleted * @param driver the driver being deleted */ static int devclass_driver_deleted(devclass_t busclass, devclass_t dc, driver_t *driver) { devclass_t parent; device_t dev; int error, i; /* * Disassociate from any devices. We iterate through all the * devices in the devclass of the driver and detach any which are * using the driver and which have a parent in the devclass which * we are deleting from. * * Note that since a driver can be in multiple devclasses, we * should not detach devices which are not children of devices in * the affected devclass. * * If we're frozen, we don't generate NOMATCH events. Mark to * generate later. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_detach(dev)) != 0) return (error); if (device_frozen) { dev->flags &= ~DF_DONENOMATCH; dev->flags |= DF_NEEDNOMATCH; } else { BUS_PROBE_NOMATCH(dev->parent, dev); devnomatch(dev); dev->flags |= DF_DONENOMATCH; } } } } /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(busclass->flags & DC_HAS_CHILDREN)) return (0); parent = busclass; TAILQ_FOREACH(busclass, &devclasses, link) { if (busclass->parent == parent) { error = devclass_driver_deleted(busclass, dc, driver); if (error) return (error); } } return (0); } /** * @brief Delete a device driver from a device class * * Delete a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_delete_driver() will first attempt to detach from each * device. If one of the detach calls fails, the driver will not be * deleted. * * @param dc the devclass to edit * @param driver the driver to unregister */ int devclass_delete_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } error = devclass_driver_deleted(busclass, dc, driver); if (error != 0) return (error); TAILQ_REMOVE(&busclass->drivers, dl, link); free(dl, M_BUS); /* XXX: kobj_mtx */ driver->refs--; if (driver->refs == 0) kobj_class_free((kobj_class_t) driver); bus_data_generation_update(); return (0); } /** * @brief Quiesces a set of device drivers from a device class * * Quiesce a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_quiesece_driver() will first attempt to quiesce each * device. * * @param dc the devclass to edit * @param driver the driver to unregister */ static int devclass_quiesce_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; device_t dev; int i; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } /* * Quiesce all devices. We iterate through all the devices in * the devclass of the driver and quiesce any which are using * the driver and which have a parent in the devclass which we * are quiescing. * * Note that since a driver can be in multiple devclasses, we * should not quiesce devices which are not children of * devices in the affected devclass. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_quiesce(dev)) != 0) return (error); } } } return (0); } /** * @internal */ static driverlink_t devclass_find_driver_internal(devclass_t dc, const char *classname) { driverlink_t dl; PDEBUG(("%s in devclass %s", classname, DEVCLANAME(dc))); TAILQ_FOREACH(dl, &dc->drivers, link) { if (!strcmp(dl->driver->name, classname)) return (dl); } PDEBUG(("not found")); return (NULL); } /** * @brief Return the name of the devclass */ const char * devclass_get_name(devclass_t dc) { return (dc->name); } /** * @brief Find a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t devclass_get_device(devclass_t dc, int unit) { if (dc == NULL || unit < 0 || unit >= dc->maxunit) return (NULL); return (dc->devices[unit]); } /** * @brief Find the softc field of a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the softc field of the device with the given * unit number or @c NULL if there is no such * device */ void * devclass_get_softc(devclass_t dc, int unit) { device_t dev; dev = devclass_get_device(dc, unit); if (!dev) return (NULL); return (device_get_softc(dev)); } /** * @brief Get a list of devices in the devclass * * An array containing a list of all the devices in the given devclass * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP), even if @p *devcountp is 0. * * @param dc the devclass to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_devices(devclass_t dc, device_t **devlistp, int *devcountp) { int count, i; device_t *list; count = devclass_get_count(dc); list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { list[count] = dc->devices[i]; count++; } } *devlistp = list; *devcountp = count; return (0); } /** * @brief Get a list of drivers in the devclass * * An array containing a list of pointers to all the drivers in the * given devclass is allocated and returned in @p *listp. The number * of drivers in the array is returned in @p *countp. The caller should * free the array using @c free(p, M_TEMP). * * @param dc the devclass to examine * @param listp gives location for array pointer return value * @param countp gives location for number of array elements * return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp) { driverlink_t dl; driver_t **list; int count; count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) count++; list = malloc(count * sizeof(driver_t *), M_TEMP, M_NOWAIT); if (list == NULL) return (ENOMEM); count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) { list[count] = dl->driver; count++; } *listp = list; *countp = count; return (0); } /** * @brief Get the number of devices in a devclass * * @param dc the devclass to examine */ int devclass_get_count(devclass_t dc) { int count, i; count = 0; for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) count++; return (count); } /** * @brief Get the maximum unit number used in a devclass * * Note that this is one greater than the highest currently-allocated * unit. If a null devclass_t is passed in, -1 is returned to indicate * that not even the devclass has been allocated yet. * * @param dc the devclass to examine */ int devclass_get_maxunit(devclass_t dc) { if (dc == NULL) return (-1); return (dc->maxunit); } /** * @brief Find a free unit number in a devclass * * This function searches for the first unused unit number greater * that or equal to @p unit. * * @param dc the devclass to examine * @param unit the first unit number to check */ int devclass_find_free_unit(devclass_t dc, int unit) { if (dc == NULL) return (unit); while (unit < dc->maxunit && dc->devices[unit] != NULL) unit++; return (unit); } /** * @brief Set the parent of a devclass * * The parent class is normally initialised automatically by * DRIVER_MODULE(). * * @param dc the devclass to edit * @param pdc the new parent devclass */ void devclass_set_parent(devclass_t dc, devclass_t pdc) { dc->parent = pdc; } /** * @brief Get the parent of a devclass * * @param dc the devclass to examine */ devclass_t devclass_get_parent(devclass_t dc) { return (dc->parent); } struct sysctl_ctx_list * devclass_get_sysctl_ctx(devclass_t dc) { return (&dc->sysctl_ctx); } struct sysctl_oid * devclass_get_sysctl_tree(devclass_t dc) { return (dc->sysctl_tree); } /** * @internal * @brief Allocate a unit number * * On entry, @p *unitp is the desired unit number (or @c -1 if any * will do). The allocated unit number is returned in @p *unitp. * @param dc the devclass to allocate from * @param unitp points at the location for the allocated unit * number * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure */ static int devclass_alloc_unit(devclass_t dc, device_t dev, int *unitp) { const char *s; int unit = *unitp; PDEBUG(("unit %d in devclass %s", unit, DEVCLANAME(dc))); /* Ask the parent bus if it wants to wire this device. */ if (unit == -1) BUS_HINT_DEVICE_UNIT(device_get_parent(dev), dev, dc->name, &unit); /* If we were given a wired unit number, check for existing device */ /* XXX imp XXX */ if (unit != -1) { if (unit >= 0 && unit < dc->maxunit && dc->devices[unit] != NULL) { if (bootverbose) printf("%s: %s%d already exists; skipping it\n", dc->name, dc->name, *unitp); return (EEXIST); } } else { /* Unwired device, find the next available slot for it */ unit = 0; for (unit = 0;; unit++) { /* If there is an "at" hint for a unit then skip it. */ if (resource_string_value(dc->name, unit, "at", &s) == 0) continue; /* If this device slot is already in use, skip it. */ if (unit < dc->maxunit && dc->devices[unit] != NULL) continue; break; } } /* * We've selected a unit beyond the length of the table, so let's * extend the table to make room for all units up to and including * this one. */ if (unit >= dc->maxunit) { device_t *newlist, *oldlist; int newsize; oldlist = dc->devices; newsize = roundup((unit + 1), MAX(1, MINALLOCSIZE / sizeof(device_t))); newlist = malloc(sizeof(device_t) * newsize, M_BUS, M_NOWAIT); if (!newlist) return (ENOMEM); if (oldlist != NULL) bcopy(oldlist, newlist, sizeof(device_t) * dc->maxunit); bzero(newlist + dc->maxunit, sizeof(device_t) * (newsize - dc->maxunit)); dc->devices = newlist; dc->maxunit = newsize; if (oldlist != NULL) free(oldlist, M_BUS); } PDEBUG(("now: unit %d in devclass %s", unit, DEVCLANAME(dc))); *unitp = unit; return (0); } /** * @internal * @brief Add a device to a devclass * * A unit number is allocated for the device (using the device's * preferred unit number if any) and the device is registered in the * devclass. This allows the device to be looked up by its unit * number, e.g. by decoding a dev_t minor number. * * @param dc the devclass to add to * @param dev the device to add * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure */ static int devclass_add_device(devclass_t dc, device_t dev) { int buflen, error; PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); buflen = snprintf(NULL, 0, "%s%d$", dc->name, INT_MAX); if (buflen < 0) return (ENOMEM); dev->nameunit = malloc(buflen, M_BUS, M_NOWAIT|M_ZERO); if (!dev->nameunit) return (ENOMEM); if ((error = devclass_alloc_unit(dc, dev, &dev->unit)) != 0) { free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (error); } dc->devices[dev->unit] = dev; dev->devclass = dc; snprintf(dev->nameunit, buflen, "%s%d", dc->name, dev->unit); return (0); } /** * @internal * @brief Delete a device from a devclass * * The device is removed from the devclass's device list and its unit * number is freed. * @param dc the devclass to delete from * @param dev the device to delete * * @retval 0 success */ static int devclass_delete_device(devclass_t dc, device_t dev) { if (!dc || !dev) return (0); PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); if (dev->devclass != dc || dc->devices[dev->unit] != dev) panic("devclass_delete_device: inconsistent device class"); dc->devices[dev->unit] = NULL; if (dev->flags & DF_WILDCARD) dev->unit = -1; dev->devclass = NULL; free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (0); } /** * @internal * @brief Make a new device and add it as a child of @p parent * * @param parent the parent of the new device * @param name the devclass name of the new device or @c NULL * to leave the devclass unspecified * @parem unit the unit number of the new device of @c -1 to * leave the unit number unspecified * * @returns the new device */ static device_t make_device(device_t parent, const char *name, int unit) { device_t dev; devclass_t dc; PDEBUG(("%s at %s as unit %d", name, DEVICENAME(parent), unit)); if (name) { dc = devclass_find_internal(name, NULL, TRUE); if (!dc) { printf("make_device: can't find device class %s\n", name); return (NULL); } } else { dc = NULL; } dev = malloc(sizeof(*dev), M_BUS, M_NOWAIT|M_ZERO); if (!dev) return (NULL); dev->parent = parent; TAILQ_INIT(&dev->children); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; dev->devclass = NULL; dev->unit = unit; dev->nameunit = NULL; dev->desc = NULL; dev->busy = 0; dev->devflags = 0; dev->flags = DF_ENABLED; dev->order = 0; if (unit == -1) dev->flags |= DF_WILDCARD; if (name) { dev->flags |= DF_FIXEDCLASS; if (devclass_add_device(dc, dev)) { kobj_delete((kobj_t) dev, M_BUS); return (NULL); } } if (parent != NULL && device_has_quiet_children(parent)) dev->flags |= DF_QUIET | DF_QUIET_CHILDREN; dev->ivars = NULL; dev->softc = NULL; dev->state = DS_NOTPRESENT; TAILQ_INSERT_TAIL(&bus_data_devices, dev, devlink); bus_data_generation_update(); return (dev); } /** * @internal * @brief Print a description of a device. */ static int device_print_child(device_t dev, device_t child) { int retval = 0; if (device_is_alive(child)) retval += BUS_PRINT_CHILD(dev, child); else retval += device_printf(child, " not found\n"); return (retval); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with order zero. * * @param dev the device which will be the parent of the * new child device * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c -1 if not * specified * * @returns the new device */ device_t device_add_child(device_t dev, const char *name, int unit) { return (device_add_child_ordered(dev, 0, name, unit)); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with the same order. * * @param dev the device which will be the parent of the * new child device * @param order a value which is used to partially sort the * children of @p dev - devices created using * lower values of @p order appear first in @p * dev's list of children * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c -1 if not * specified * * @returns the new device */ device_t device_add_child_ordered(device_t dev, u_int order, const char *name, int unit) { device_t child; device_t place; PDEBUG(("%s at %s with order %u as unit %d", name, DEVICENAME(dev), order, unit)); KASSERT(name != NULL || unit == -1, ("child device with wildcard name and specific unit number")); child = make_device(dev, name, unit); if (child == NULL) return (child); child->order = order; TAILQ_FOREACH(place, &dev->children, link) { if (place->order > order) break; } if (place) { /* * The device 'place' is the first device whose order is * greater than the new child. */ TAILQ_INSERT_BEFORE(place, child, link); } else { /* * The new child's order is greater or equal to the order of * any existing device. Add the child to the tail of the list. */ TAILQ_INSERT_TAIL(&dev->children, child, link); } bus_data_generation_update(); return (child); } /** * @brief Delete a device * * This function deletes a device along with all of its children. If * the device currently has a driver attached to it, the device is * detached first using device_detach(). * * @param dev the parent device * @param child the device to delete * * @retval 0 success * @retval non-zero a unit error code describing the error */ int device_delete_child(device_t dev, device_t child) { int error; device_t grandchild; PDEBUG(("%s from %s", DEVICENAME(child), DEVICENAME(dev))); /* detach parent before deleting children, if any */ if ((error = device_detach(child)) != 0) return (error); /* remove children second */ while ((grandchild = TAILQ_FIRST(&child->children)) != NULL) { error = device_delete_child(child, grandchild); if (error) return (error); } if (child->devclass) devclass_delete_device(child->devclass, child); if (child->parent) BUS_CHILD_DELETED(dev, child); TAILQ_REMOVE(&dev->children, child, link); TAILQ_REMOVE(&bus_data_devices, child, devlink); kobj_delete((kobj_t) child, M_BUS); bus_data_generation_update(); return (0); } /** * @brief Delete all children devices of the given device, if any. * * This function deletes all children devices of the given device, if * any, using the device_delete_child() function for each device it * finds. If a child device cannot be deleted, this function will * return an error code. * * @param dev the parent device * * @retval 0 success * @retval non-zero a device would not detach */ int device_delete_children(device_t dev) { device_t child; int error; PDEBUG(("Deleting all children of %s", DEVICENAME(dev))); error = 0; while ((child = TAILQ_FIRST(&dev->children)) != NULL) { error = device_delete_child(dev, child); if (error) { PDEBUG(("Failed deleting %s", DEVICENAME(child))); break; } } return (error); } /** * @brief Find a device given a unit number * * This is similar to devclass_get_devices() but only searches for * devices which have @p dev as a parent. * * @param dev the parent device to search * @param unit the unit number to search for. If the unit is -1, * return the first child of @p dev which has name * @p classname (that is, the one with the lowest unit.) * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t device_find_child(device_t dev, const char *classname, int unit) { devclass_t dc; device_t child; dc = devclass_find(classname); if (!dc) return (NULL); if (unit != -1) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } else { for (unit = 0; unit < devclass_get_maxunit(dc); unit++) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } } return (NULL); } /** * @internal */ static driverlink_t first_matching_driver(devclass_t dc, device_t dev) { if (dev->devclass) return (devclass_find_driver_internal(dc, dev->devclass->name)); return (TAILQ_FIRST(&dc->drivers)); } /** * @internal */ static driverlink_t next_matching_driver(devclass_t dc, device_t dev, driverlink_t last) { if (dev->devclass) { driverlink_t dl; for (dl = TAILQ_NEXT(last, link); dl; dl = TAILQ_NEXT(dl, link)) if (!strcmp(dev->devclass->name, dl->driver->name)) return (dl); return (NULL); } return (TAILQ_NEXT(last, link)); } /** * @internal */ int device_probe_child(device_t dev, device_t child) { devclass_t dc; driverlink_t best = NULL; driverlink_t dl; int result, pri = 0; int hasclass = (child->devclass != NULL); GIANT_REQUIRED; dc = dev->devclass; if (!dc) panic("device_probe_child: parent device has no devclass"); /* * If the state is already probed, then return. However, don't * return if we can rebid this object. */ if (child->state == DS_ALIVE && (child->flags & DF_REBID) == 0) return (0); for (; dc; dc = dc->parent) { for (dl = first_matching_driver(dc, child); dl; dl = next_matching_driver(dc, child, dl)) { /* If this driver's pass is too high, then ignore it. */ if (dl->pass > bus_current_pass) continue; PDEBUG(("Trying %s", DRIVERNAME(dl->driver))); result = device_set_driver(child, dl->driver); if (result == ENOMEM) return (result); else if (result != 0) continue; if (!hasclass) { if (device_set_devclass(child, dl->driver->name) != 0) { char const * devname = device_get_name(child); if (devname == NULL) devname = "(unknown)"; printf("driver bug: Unable to set " "devclass (class: %s " "devname: %s)\n", dl->driver->name, devname); (void)device_set_driver(child, NULL); continue; } } /* Fetch any flags for the device before probing. */ resource_int_value(dl->driver->name, child->unit, "flags", &child->devflags); result = DEVICE_PROBE(child); /* Reset flags and devclass before the next probe. */ child->devflags = 0; if (!hasclass) (void)device_set_devclass(child, NULL); /* * If the driver returns SUCCESS, there can be * no higher match for this device. */ if (result == 0) { best = dl; pri = 0; break; } /* * Reset DF_QUIET in case this driver doesn't * end up as the best driver. */ device_verbose(child); /* * Probes that return BUS_PROBE_NOWILDCARD or lower * only match on devices whose driver was explicitly * specified. */ if (result <= BUS_PROBE_NOWILDCARD && !(child->flags & DF_FIXEDCLASS)) { result = ENXIO; } /* * The driver returned an error so it * certainly doesn't match. */ if (result > 0) { (void)device_set_driver(child, NULL); continue; } /* * A priority lower than SUCCESS, remember the * best matching driver. Initialise the value * of pri for the first match. */ if (best == NULL || result > pri) { best = dl; pri = result; continue; } } /* * If we have an unambiguous match in this devclass, * don't look in the parent. */ if (best && pri == 0) break; } /* * If we found a driver, change state and initialise the devclass. */ /* XXX What happens if we rebid and got no best? */ if (best) { /* * If this device was attached, and we were asked to * rescan, and it is a different driver, then we have * to detach the old driver and reattach this new one. * Note, we don't have to check for DF_REBID here * because if the state is > DS_ALIVE, we know it must * be. * * This assumes that all DF_REBID drivers can have * their probe routine called at any time and that * they are idempotent as well as completely benign in * normal operations. * * We also have to make sure that the detach * succeeded, otherwise we fail the operation (or * maybe it should just fail silently? I'm torn). */ if (child->state > DS_ALIVE && best->driver != child->driver) if ((result = device_detach(dev)) != 0) return (result); /* Set the winning driver, devclass, and flags. */ if (!child->devclass) { result = device_set_devclass(child, best->driver->name); if (result != 0) return (result); } result = device_set_driver(child, best->driver); if (result != 0) return (result); resource_int_value(best->driver->name, child->unit, "flags", &child->devflags); if (pri < 0) { /* * A bit bogus. Call the probe method again to make * sure that we have the right description. */ DEVICE_PROBE(child); #if 0 child->flags |= DF_REBID; #endif } else child->flags &= ~DF_REBID; child->state = DS_ALIVE; bus_data_generation_update(); return (0); } return (ENXIO); } /** * @brief Return the parent of a device */ device_t device_get_parent(device_t dev) { return (dev->parent); } /** * @brief Get a list of children of a device * * An array containing a list of all the children of the given device * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP). * * @param dev the device to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int device_get_children(device_t dev, device_t **devlistp, int *devcountp) { int count; device_t child; device_t *list; count = 0; TAILQ_FOREACH(child, &dev->children, link) { count++; } if (count == 0) { *devlistp = NULL; *devcountp = 0; return (0); } list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; TAILQ_FOREACH(child, &dev->children, link) { list[count] = child; count++; } *devlistp = list; *devcountp = count; return (0); } /** * @brief Return the current driver for the device or @c NULL if there * is no driver currently attached */ driver_t * device_get_driver(device_t dev) { return (dev->driver); } /** * @brief Return the current devclass for the device or @c NULL if * there is none. */ devclass_t device_get_devclass(device_t dev) { return (dev->devclass); } /** * @brief Return the name of the device's devclass or @c NULL if there * is none. */ const char * device_get_name(device_t dev) { if (dev != NULL && dev->devclass) return (devclass_get_name(dev->devclass)); return (NULL); } /** * @brief Return a string containing the device's devclass name * followed by an ascii representation of the device's unit number * (e.g. @c "foo2"). */ const char * device_get_nameunit(device_t dev) { return (dev->nameunit); } /** * @brief Return the device's unit number. */ int device_get_unit(device_t dev) { return (dev->unit); } /** * @brief Return the device's description string */ const char * device_get_desc(device_t dev) { return (dev->desc); } /** * @brief Return the device's flags */ uint32_t device_get_flags(device_t dev) { return (dev->devflags); } struct sysctl_ctx_list * device_get_sysctl_ctx(device_t dev) { return (&dev->sysctl_ctx); } struct sysctl_oid * device_get_sysctl_tree(device_t dev) { return (dev->sysctl_tree); } /** * @brief Print the name of the device followed by a colon and a space * * @returns the number of characters printed */ int device_print_prettyname(device_t dev) { const char *name = device_get_name(dev); if (name == NULL) return (printf("unknown: ")); return (printf("%s%d: ", name, device_get_unit(dev))); } /** * @brief Print the name of the device followed by a colon, a space * and the result of calling vprintf() with the value of @p fmt and * the following arguments. * * @returns the number of characters printed */ int device_printf(device_t dev, const char * fmt, ...) { char buf[128]; struct sbuf sb; const char *name; va_list ap; size_t retval; retval = 0; sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); sbuf_set_drain(&sb, sbuf_printf_drain, &retval); name = device_get_name(dev); if (name == NULL) sbuf_cat(&sb, "unknown: "); else sbuf_printf(&sb, "%s%d: ", name, device_get_unit(dev)); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); sbuf_delete(&sb); return (retval); } /** * @brief Print the name of the device followed by a colon, a space * and the result of calling log() with the value of @p fmt and * the following arguments. * * @returns the number of characters printed */ int device_log(device_t dev, int pri, const char * fmt, ...) { char buf[128]; struct sbuf sb; const char *name; va_list ap; size_t retval; retval = 0; sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); name = device_get_name(dev); if (name == NULL) sbuf_cat(&sb, "unknown: "); else sbuf_printf(&sb, "%s%d: ", name, device_get_unit(dev)); va_start(ap, fmt); sbuf_vprintf(&sb, fmt, ap); va_end(ap); sbuf_finish(&sb); log(pri, "%.*s", (int) sbuf_len(&sb), sbuf_data(&sb)); retval = sbuf_len(&sb); sbuf_delete(&sb); return (retval); } /** * @internal */ static void device_set_desc_internal(device_t dev, const char* desc, int copy) { if (dev->desc && (dev->flags & DF_DESCMALLOCED)) { free(dev->desc, M_BUS); dev->flags &= ~DF_DESCMALLOCED; dev->desc = NULL; } if (copy && desc) { dev->desc = malloc(strlen(desc) + 1, M_BUS, M_NOWAIT); if (dev->desc) { strcpy(dev->desc, desc); dev->flags |= DF_DESCMALLOCED; } } else { /* Avoid a -Wcast-qual warning */ dev->desc = (char *)(uintptr_t) desc; } bus_data_generation_update(); } /** * @brief Set the device's description * * The value of @c desc should be a string constant that will not * change (at least until the description is changed in a subsequent * call to device_set_desc() or device_set_desc_copy()). */ void device_set_desc(device_t dev, const char* desc) { device_set_desc_internal(dev, desc, FALSE); } /** * @brief Set the device's description * * The string pointed to by @c desc is copied. Use this function if * the device description is generated, (e.g. with sprintf()). */ void device_set_desc_copy(device_t dev, const char* desc) { device_set_desc_internal(dev, desc, TRUE); } /** * @brief Set the device's flags */ void device_set_flags(device_t dev, uint32_t flags) { dev->devflags = flags; } /** * @brief Return the device's softc field * * The softc is allocated and zeroed when a driver is attached, based * on the size field of the driver. */ void * device_get_softc(device_t dev) { return (dev->softc); } /** * @brief Set the device's softc field * * Most drivers do not need to use this since the softc is allocated * automatically when the driver is attached. */ void device_set_softc(device_t dev, void *softc) { if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) free(dev->softc, M_BUS_SC); dev->softc = softc; if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Free claimed softc * * Most drivers do not need to use this since the softc is freed * automatically when the driver is detached. */ void device_free_softc(void *softc) { free(softc, M_BUS_SC); } /** * @brief Claim softc * * This function can be used to let the driver free the automatically * allocated softc using "device_free_softc()". This function is * useful when the driver is refcounting the softc and the softc * cannot be freed when the "device_detach" method is called. */ void device_claim_softc(device_t dev) { if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Get the device's ivars field * * The ivars field is used by the parent device to store per-device * state (e.g. the physical location of the device or a list of * resources). */ void * device_get_ivars(device_t dev) { KASSERT(dev != NULL, ("device_get_ivars(NULL, ...)")); return (dev->ivars); } /** * @brief Set the device's ivars field */ void device_set_ivars(device_t dev, void * ivars) { KASSERT(dev != NULL, ("device_set_ivars(NULL, ...)")); dev->ivars = ivars; } /** * @brief Return the device's state */ device_state_t device_get_state(device_t dev) { return (dev->state); } /** * @brief Set the DF_ENABLED flag for the device */ void device_enable(device_t dev) { dev->flags |= DF_ENABLED; } /** * @brief Clear the DF_ENABLED flag for the device */ void device_disable(device_t dev) { dev->flags &= ~DF_ENABLED; } /** * @brief Increment the busy counter for the device */ void device_busy(device_t dev) { if (dev->state < DS_ATTACHING) panic("device_busy: called for unattached device"); if (dev->busy == 0 && dev->parent) device_busy(dev->parent); dev->busy++; if (dev->state == DS_ATTACHED) dev->state = DS_BUSY; } /** * @brief Decrement the busy counter for the device */ void device_unbusy(device_t dev) { if (dev->busy != 0 && dev->state != DS_BUSY && dev->state != DS_ATTACHING) panic("device_unbusy: called for non-busy device %s", device_get_nameunit(dev)); dev->busy--; if (dev->busy == 0) { if (dev->parent) device_unbusy(dev->parent); if (dev->state == DS_BUSY) dev->state = DS_ATTACHED; } } /** * @brief Set the DF_QUIET flag for the device */ void device_quiet(device_t dev) { dev->flags |= DF_QUIET; } /** * @brief Set the DF_QUIET_CHILDREN flag for the device */ void device_quiet_children(device_t dev) { dev->flags |= DF_QUIET_CHILDREN; } /** * @brief Clear the DF_QUIET flag for the device */ void device_verbose(device_t dev) { dev->flags &= ~DF_QUIET; } /** * @brief Return non-zero if the DF_QUIET_CHIDLREN flag is set on the device */ int device_has_quiet_children(device_t dev) { return ((dev->flags & DF_QUIET_CHILDREN) != 0); } /** * @brief Return non-zero if the DF_QUIET flag is set on the device */ int device_is_quiet(device_t dev) { return ((dev->flags & DF_QUIET) != 0); } /** * @brief Return non-zero if the DF_ENABLED flag is set on the device */ int device_is_enabled(device_t dev) { return ((dev->flags & DF_ENABLED) != 0); } /** * @brief Return non-zero if the device was successfully probed */ int device_is_alive(device_t dev) { return (dev->state >= DS_ALIVE); } /** * @brief Return non-zero if the device currently has a driver * attached to it */ int device_is_attached(device_t dev) { return (dev->state >= DS_ATTACHED); } /** * @brief Return non-zero if the device is currently suspended. */ int device_is_suspended(device_t dev) { return ((dev->flags & DF_SUSPENDED) != 0); } /** * @brief Set the devclass of a device * @see devclass_add_device(). */ int device_set_devclass(device_t dev, const char *classname) { devclass_t dc; int error; if (!classname) { if (dev->devclass) devclass_delete_device(dev->devclass, dev); return (0); } if (dev->devclass) { printf("device_set_devclass: device class already set\n"); return (EINVAL); } dc = devclass_find_internal(classname, NULL, TRUE); if (!dc) return (ENOMEM); error = devclass_add_device(dc, dev); bus_data_generation_update(); return (error); } /** * @brief Set the devclass of a device and mark the devclass fixed. * @see device_set_devclass() */ int device_set_devclass_fixed(device_t dev, const char *classname) { int error; if (classname == NULL) return (EINVAL); error = device_set_devclass(dev, classname); if (error) return (error); dev->flags |= DF_FIXEDCLASS; return (0); } /** * @brief Query the device to determine if it's of a fixed devclass * @see device_set_devclass_fixed() */ bool device_is_devclass_fixed(device_t dev) { return ((dev->flags & DF_FIXEDCLASS) != 0); } /** * @brief Set the driver of a device * * @retval 0 success * @retval EBUSY the device already has a driver attached * @retval ENOMEM a memory allocation failure occurred */ int device_set_driver(device_t dev, driver_t *driver) { int domain; struct domainset *policy; if (dev->state >= DS_ATTACHED) return (EBUSY); if (dev->driver == driver) return (0); if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) { free(dev->softc, M_BUS_SC); dev->softc = NULL; } device_set_desc(dev, NULL); kobj_delete((kobj_t) dev, NULL); dev->driver = driver; if (driver) { kobj_init((kobj_t) dev, (kobj_class_t) driver); if (!(dev->flags & DF_EXTERNALSOFTC) && driver->size > 0) { if (bus_get_domain(dev, &domain) == 0) policy = DOMAINSET_PREF(domain); else policy = DOMAINSET_RR(); dev->softc = malloc_domainset(driver->size, M_BUS_SC, policy, M_NOWAIT | M_ZERO); if (!dev->softc) { kobj_delete((kobj_t) dev, NULL); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; return (ENOMEM); } } } else { kobj_init((kobj_t) dev, &null_class); } bus_data_generation_update(); return (0); } /** * @brief Probe a device, and return this status. * * This function is the core of the device autoconfiguration * system. Its purpose is to select a suitable driver for a device and * then call that driver to initialise the hardware appropriately. The * driver is selected by calling the DEVICE_PROBE() method of a set of * candidate drivers and then choosing the driver which returned the * best value. This driver is then attached to the device using * device_attach(). * * The set of suitable drivers is taken from the list of drivers in * the parent device's devclass. If the device was originally created * with a specific class name (see device_add_child()), only drivers * with that name are probed, otherwise all drivers in the devclass * are probed. If no drivers return successful probe values in the * parent devclass, the search continues in the parent of that * devclass (see devclass_get_parent()) if any. * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code * @retval -1 Device already attached */ int device_probe(device_t dev) { int error; GIANT_REQUIRED; if (dev->state >= DS_ALIVE && (dev->flags & DF_REBID) == 0) return (-1); if (!(dev->flags & DF_ENABLED)) { if (bootverbose && device_get_name(dev) != NULL) { device_print_prettyname(dev); printf("not probed (disabled)\n"); } return (-1); } if ((error = device_probe_child(dev->parent, dev)) != 0) { if (bus_current_pass == BUS_PASS_DEFAULT && !(dev->flags & DF_DONENOMATCH)) { BUS_PROBE_NOMATCH(dev->parent, dev); devnomatch(dev); dev->flags |= DF_DONENOMATCH; } return (error); } return (0); } /** * @brief Probe a device and attach a driver if possible * * calls device_probe() and attaches if that was successful. */ int device_probe_and_attach(device_t dev) { int error; GIANT_REQUIRED; error = device_probe(dev); if (error == -1) return (0); else if (error != 0) return (error); CURVNET_SET_QUIET(vnet0); error = device_attach(dev); CURVNET_RESTORE(); return error; } /** * @brief Attach a device driver to a device * * This function is a wrapper around the DEVICE_ATTACH() driver * method. In addition to calling DEVICE_ATTACH(), it initialises the * device's sysctl tree, optionally prints a description of the device * and queues a notification event for user-based device management * services. * * Normally this function is only called internally from * device_probe_and_attach(). * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_attach(device_t dev) { uint64_t attachtime; uint16_t attachentropy; int error; if (resource_disabled(dev->driver->name, dev->unit)) { device_disable(dev); if (bootverbose) device_printf(dev, "disabled via hints entry\n"); return (ENXIO); } device_sysctl_init(dev); if (!device_is_quiet(dev)) device_print_child(dev->parent, dev); attachtime = get_cyclecount(); dev->state = DS_ATTACHING; if ((error = DEVICE_ATTACH(dev)) != 0) { printf("device_attach: %s%d attach returned %d\n", dev->driver->name, dev->unit, error); if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); KASSERT(dev->busy == 0, ("attach failed but busy")); dev->state = DS_NOTPRESENT; return (error); } dev->flags |= DF_ATTACHED_ONCE; /* We only need the low bits of this time, but ranges from tens to thousands * have been seen, so keep 2 bytes' worth. */ attachentropy = (uint16_t)(get_cyclecount() - attachtime); random_harvest_direct(&attachentropy, sizeof(attachentropy), RANDOM_ATTACH); device_sysctl_update(dev); if (dev->busy) dev->state = DS_BUSY; else dev->state = DS_ATTACHED; dev->flags &= ~DF_DONENOMATCH; EVENTHANDLER_DIRECT_INVOKE(device_attach, dev); devadded(dev); return (0); } /** * @brief Detach a driver from a device * * This function is a wrapper around the DEVICE_DETACH() driver * method. If the call to DEVICE_DETACH() succeeds, it calls * BUS_CHILD_DETACHED() for the parent of @p dev, queues a * notification event for user-based device management services and * cleans up the device's sysctl tree. * * @param dev the device to un-initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_detach(device_t dev) { int error; GIANT_REQUIRED; PDEBUG(("%s", DEVICENAME(dev))); if (dev->state == DS_BUSY) return (EBUSY); if (dev->state == DS_ATTACHING) { device_printf(dev, "device in attaching state! Deferring detach.\n"); return (EBUSY); } if (dev->state != DS_ATTACHED) return (0); EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_BEGIN); if ((error = DEVICE_DETACH(dev)) != 0) { EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_FAILED); return (error); } else { EVENTHANDLER_DIRECT_INVOKE(device_detach, dev, EVHDEV_DETACH_COMPLETE); } devremoved(dev); if (!device_is_quiet(dev)) device_printf(dev, "detached\n"); if (dev->parent) BUS_CHILD_DETACHED(dev->parent, dev); if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); device_verbose(dev); dev->state = DS_NOTPRESENT; (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); return (0); } /** * @brief Tells a driver to quiesce itself. * * This function is a wrapper around the DEVICE_QUIESCE() driver * method. If the call to DEVICE_QUIESCE() succeeds. * * @param dev the device to quiesce * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_quiesce(device_t dev) { PDEBUG(("%s", DEVICENAME(dev))); if (dev->state == DS_BUSY) return (EBUSY); if (dev->state != DS_ATTACHED) return (0); return (DEVICE_QUIESCE(dev)); } /** * @brief Notify a device of system shutdown * * This function calls the DEVICE_SHUTDOWN() driver method if the * device currently has an attached driver. * * @returns the value returned by DEVICE_SHUTDOWN() */ int device_shutdown(device_t dev) { if (dev->state < DS_ATTACHED) return (0); return (DEVICE_SHUTDOWN(dev)); } /** * @brief Set the unit number of a device * * This function can be used to override the unit number used for a * device (e.g. to wire a device to a pre-configured unit number). */ int device_set_unit(device_t dev, int unit) { devclass_t dc; int err; dc = device_get_devclass(dev); if (unit < dc->maxunit && dc->devices[unit]) return (EBUSY); err = devclass_delete_device(dc, dev); if (err) return (err); dev->unit = unit; err = devclass_add_device(dc, dev); if (err) return (err); bus_data_generation_update(); return (0); } /*======================================*/ /* * Some useful method implementations to make life easier for bus drivers. */ void resource_init_map_request_impl(struct resource_map_request *args, size_t sz) { bzero(args, sz); args->size = sz; args->memattr = VM_MEMATTR_DEVICE; } /** * @brief Initialise a resource list. * * @param rl the resource list to initialise */ void resource_list_init(struct resource_list *rl) { STAILQ_INIT(rl); } /** * @brief Reclaim memory used by a resource list. * * This function frees the memory for all resource entries on the list * (if any). * * @param rl the resource list to free */ void resource_list_free(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) panic("resource_list_free: resource entry is busy"); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } /** * @brief Add a resource entry. * * This function adds a resource entry using the given @p type, @p * start, @p end and @p count values. A rid value is chosen by * searching sequentially for the first unused rid starting at zero. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ int resource_list_add_next(struct resource_list *rl, int type, rman_res_t start, rman_res_t end, rman_res_t count) { int rid; rid = 0; while (resource_list_find(rl, type, rid) != NULL) rid++; resource_list_add(rl, type, rid, start, end, count); return (rid); } /** * @brief Add or modify a resource entry. * * If an existing entry exists with the same type and rid, it will be * modified using the given values of @p start, @p end and @p * count. If no entry exists, a new one will be created using the * given values. The resource list entry that matches is then returned. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ struct resource_list_entry * resource_list_add(struct resource_list *rl, int type, int rid, rman_res_t start, rman_res_t end, rman_res_t count) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) { rle = malloc(sizeof(struct resource_list_entry), M_BUS, M_NOWAIT); if (!rle) panic("resource_list_add: can't record entry"); STAILQ_INSERT_TAIL(rl, rle, link); rle->type = type; rle->rid = rid; rle->res = NULL; rle->flags = 0; } if (rle->res) panic("resource_list_add: resource entry is busy"); rle->start = start; rle->end = end; rle->count = count; return (rle); } /** * @brief Determine if a resource entry is busy. * * Returns true if a resource entry is busy meaning that it has an * associated resource that is not an unallocated "reserved" resource. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is busy, zero otherwise. */ int resource_list_busy(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle == NULL || rle->res == NULL) return (0); if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) { KASSERT(!(rman_get_flags(rle->res) & RF_ACTIVE), ("reserved resource is active")); return (0); } return (1); } /** * @brief Determine if a resource entry is reserved. * * Returns true if a resource entry is reserved meaning that it has an * associated "reserved" resource. The resource can either be * allocated or unallocated. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is reserved, zero otherwise. */ int resource_list_reserved(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle != NULL && rle->flags & RLE_RESERVED) return (1); return (0); } /** * @brief Find a resource entry by type and rid. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns the resource entry pointer or NULL if there is no such * entry. */ struct resource_list_entry * resource_list_find(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type == type && rle->rid == rid) return (rle); } return (NULL); } /** * @brief Delete a resource entry. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier */ void resource_list_delete(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle = resource_list_find(rl, type, rid); if (rle) { if (rle->res != NULL) panic("resource_list_delete: resource has not been released"); STAILQ_REMOVE(rl, rle, resource_list_entry, link); free(rle, M_BUS); } } /** * @brief Allocate a reserved resource * * This can be used by buses to force the allocation of resources * that are always active in the system even if they are not allocated * by a driver (e.g. PCI BARs). This function is usually called when * adding a new child to the bus. The resource is allocated from the * parent bus when it is reserved. The resource list entry is marked * with RLE_RESERVED to note that it is a reserved resource. * * Subsequent attempts to allocate the resource with * resource_list_alloc() will succeed the first time and will set * RLE_ALLOCATED to note that it has been allocated. When a reserved * resource that has been allocated is released with * resource_list_release() the resource RLE_ALLOCATED is cleared, but * the actual resource remains allocated. The resource can be released to * the parent bus by calling resource_list_unreserve(). * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device for which the resource is being reserved * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_reserve(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); struct resource *r; if (passthrough) panic( "resource_list_reserve() should only be called for direct children"); if (flags & RF_ACTIVE) panic( "resource_list_reserve() should only reserve inactive resources"); r = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); if (r != NULL) { rle = resource_list_find(rl, type, *rid); rle->flags |= RLE_RESERVED; } return (r); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE() * * Implement BUS_ALLOC_RESOURCE() by looking up a resource from the list * and passing the allocation up to the parent of @p bus. This assumes * that the first entry of @c device_get_ivars(child) is a struct * resource_list. This also handles 'passthrough' allocations where a * child is a remote descendant of bus by passing the allocation up to * the parent of bus. * * Typically, a bus driver would store a list of child resources * somewhere in the child device's ivars (see device_get_ivars()) and * its implementation of BUS_ALLOC_RESOURCE() would find that list and * then call resource_list_alloc() to perform the allocation. * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device which is requesting an allocation * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_alloc(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); if (passthrough) { return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags)); } rle = resource_list_find(rl, type, *rid); if (!rle) return (NULL); /* no resource of that type/rid */ if (rle->res) { if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) return (NULL); if ((flags & RF_ACTIVE) && bus_activate_resource(child, type, *rid, rle->res) != 0) return (NULL); rle->flags |= RLE_ALLOCATED; return (rle->res); } device_printf(bus, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (isdefault) { start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); } rle->res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); /* * Record the new range. */ if (rle->res) { rle->start = rman_get_start(rle->res); rle->end = rman_get_end(rle->res); rle->count = count; } return (rle->res); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE() * * Implement BUS_RELEASE_RESOURCE() using a resource list. Normally * used with resource_list_alloc(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device which is requesting a release * @param type the type of resource to release * @param rid the resource identifier * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_release(struct resource_list *rl, device_t bus, device_t child, int type, int rid, struct resource *res) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int error; if (passthrough) { return (BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, res)); } rle = resource_list_find(rl, type, rid); if (!rle) panic("resource_list_release: can't find resource"); if (!rle->res) panic("resource_list_release: resource entry is not busy"); if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) { if (rman_get_flags(res) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, res); if (error) return (error); } rle->flags &= ~RLE_ALLOCATED; return (0); } return (EINVAL); } error = BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, res); if (error) return (error); rle->res = NULL; return (0); } /** * @brief Release all active resources of a given type * * Release all active resources of a specified type. This is intended * to be used to cleanup resources leaked by a driver after detach or * a failed attach. * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose active resources are being released * @param type the type of resources to release * * @retval 0 success * @retval EBUSY at least one resource was active */ int resource_list_release_active(struct resource_list *rl, device_t bus, device_t child, int type) { struct resource_list_entry *rle; int error, retval; retval = 0; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->res == NULL) continue; if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) continue; retval = EBUSY; error = resource_list_release(rl, bus, child, type, rman_get_rid(rle->res), rle->res); if (error != 0) device_printf(bus, "Failed to release active resource: %d\n", error); } return (retval); } /** * @brief Fully release a reserved resource * * Fully releases a resource reserved via resource_list_reserve(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose reserved resource is being released * @param type the type of resource to release * @param rid the resource identifier * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_unreserve(struct resource_list *rl, device_t bus, device_t child, int type, int rid) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); if (passthrough) panic( "resource_list_unreserve() should only be called for direct children"); rle = resource_list_find(rl, type, rid); if (!rle) panic("resource_list_unreserve: can't find resource"); if (!(rle->flags & RLE_RESERVED)) return (EINVAL); if (rle->flags & RLE_ALLOCATED) return (EBUSY); rle->flags &= ~RLE_RESERVED; return (resource_list_release(rl, bus, child, type, rid, rle->res)); } /** * @brief Print a description of resources in a resource list * * Print all resources of a specified type, for use in BUS_PRINT_CHILD(). * The name is printed if at least one resource of the given type is available. * The format is used to print resource start and end. * * @param rl the resource list to print * @param name the name of @p type, e.g. @c "memory" * @param type type type of resource entry to print * @param format printf(9) format string to print resource * start and end values * * @returns the number of characters printed */ int resource_list_print_type(struct resource_list *rl, const char *name, int type, const char *format) { struct resource_list_entry *rle; int printed, retval; printed = 0; retval = 0; /* Yes, this is kinda cheating */ STAILQ_FOREACH(rle, rl, link) { if (rle->type == type) { if (printed == 0) retval += printf(" %s ", name); else retval += printf(","); printed++; retval += printf(format, rle->start); if (rle->count > 1) { retval += printf("-"); retval += printf(format, rle->start + rle->count - 1); } } } return (retval); } /** * @brief Releases all the resources in a list. * * @param rl The resource list to purge. * * @returns nothing */ void resource_list_purge(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) bus_release_resource(rman_get_device(rle->res), rle->type, rle->rid, rle->res); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } device_t bus_generic_add_child(device_t dev, u_int order, const char *name, int unit) { return (device_add_child_ordered(dev, order, name, unit)); } /** * @brief Helper function for implementing DEVICE_PROBE() * * This function can be used to help implement the DEVICE_PROBE() for * a bus (i.e. a device which has other devices attached to it). It * calls the DEVICE_IDENTIFY() method of each driver in the device's * devclass. */ int bus_generic_probe(device_t dev) { devclass_t dc = dev->devclass; driverlink_t dl; TAILQ_FOREACH(dl, &dc->drivers, link) { /* * If this driver's pass is too high, then ignore it. * For most drivers in the default pass, this will * never be true. For early-pass drivers they will * only call the identify routines of eligible drivers * when this routine is called. Drivers for later * passes should have their identify routines called * on early-pass buses during BUS_NEW_PASS(). */ if (dl->pass > bus_current_pass) continue; DEVICE_IDENTIFY(dl->driver, dev); } return (0); } /** * @brief Helper function for implementing DEVICE_ATTACH() * * This function can be used to help implement the DEVICE_ATTACH() for * a bus. It calls device_probe_and_attach() for each of the device's * children. */ int bus_generic_attach(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { device_probe_and_attach(child); } return (0); } /** * @brief Helper function for delaying attaching children * * Many buses can't run transactions on the bus which children need to probe and * attach until after interrupts and/or timers are running. This function * delays their attach until interrupts and timers are enabled. */ int bus_delayed_attach_children(device_t dev) { /* Probe and attach the bus children when interrupts are available */ config_intrhook_oneshot((ich_func_t)bus_generic_attach, dev); return (0); } /** * @brief Helper function for implementing DEVICE_DETACH() * * This function can be used to help implement the DEVICE_DETACH() for * a bus. It calls device_detach() for each of the device's * children. */ int bus_generic_detach(device_t dev) { device_t child; int error; if (dev->state != DS_ATTACHED) return (EBUSY); /* * Detach children in the reverse order. * See bus_generic_suspend for details. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { if ((error = device_detach(child)) != 0) return (error); } return (0); } /** * @brief Helper function for implementing DEVICE_SHUTDOWN() * * This function can be used to help implement the DEVICE_SHUTDOWN() * for a bus. It calls device_shutdown() for each of the device's * children. */ int bus_generic_shutdown(device_t dev) { device_t child; /* * Shut down children in the reverse order. * See bus_generic_suspend for details. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { device_shutdown(child); } return (0); } /** * @brief Default function for suspending a child device. * * This function is to be used by a bus's DEVICE_SUSPEND_CHILD(). */ int bus_generic_suspend_child(device_t dev, device_t child) { int error; error = DEVICE_SUSPEND(child); if (error == 0) child->flags |= DF_SUSPENDED; return (error); } /** * @brief Default function for resuming a child device. * * This function is to be used by a bus's DEVICE_RESUME_CHILD(). */ int bus_generic_resume_child(device_t dev, device_t child) { DEVICE_RESUME(child); child->flags &= ~DF_SUSPENDED; return (0); } /** * @brief Helper function for implementing DEVICE_SUSPEND() * * This function can be used to help implement the DEVICE_SUSPEND() * for a bus. It calls DEVICE_SUSPEND() for each of the device's * children. If any call to DEVICE_SUSPEND() fails, the suspend * operation is aborted and any devices which were suspended are * resumed immediately by calling their DEVICE_RESUME() methods. */ int bus_generic_suspend(device_t dev) { int error; device_t child; /* * Suspend children in the reverse order. * For most buses all children are equal, so the order does not matter. * Other buses, such as acpi, carefully order their child devices to * express implicit dependencies between them. For such buses it is * safer to bring down devices in the reverse order. */ TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { error = BUS_SUSPEND_CHILD(dev, child); if (error != 0) { child = TAILQ_NEXT(child, link); if (child != NULL) { TAILQ_FOREACH_FROM(child, &dev->children, link) BUS_RESUME_CHILD(dev, child); } return (error); } } return (0); } /** * @brief Helper function for implementing DEVICE_RESUME() * * This function can be used to help implement the DEVICE_RESUME() for * a bus. It calls DEVICE_RESUME() on each of the device's children. */ int bus_generic_resume(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { BUS_RESUME_CHILD(dev, child); /* if resume fails, there's nothing we can usefully do... */ } return (0); } /** * @brief Helper function for implementing BUS_RESET_POST * * Bus can use this function to implement common operations of * re-attaching or resuming the children after the bus itself was * reset, and after restoring bus-unique state of children. * * @param dev The bus * #param flags DEVF_RESET_* */ int bus_helper_reset_post(device_t dev, int flags) { device_t child; int error, error1; error = 0; TAILQ_FOREACH(child, &dev->children,link) { BUS_RESET_POST(dev, child); error1 = (flags & DEVF_RESET_DETACH) != 0 ? device_probe_and_attach(child) : BUS_RESUME_CHILD(dev, child); if (error == 0 && error1 != 0) error = error1; } return (error); } static void bus_helper_reset_prepare_rollback(device_t dev, device_t child, int flags) { child = TAILQ_NEXT(child, link); if (child == NULL) return; TAILQ_FOREACH_FROM(child, &dev->children,link) { BUS_RESET_POST(dev, child); if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } } /** * @brief Helper function for implementing BUS_RESET_PREPARE * * Bus can use this function to implement common operations of * detaching or suspending the children before the bus itself is * reset, and then save bus-unique state of children that must * persists around reset. * * @param dev The bus * #param flags DEVF_RESET_* */ int bus_helper_reset_prepare(device_t dev, int flags) { device_t child; int error; if (dev->state != DS_ATTACHED) return (EBUSY); TAILQ_FOREACH_REVERSE(child, &dev->children, device_list, link) { if ((flags & DEVF_RESET_DETACH) != 0) { error = device_get_state(child) == DS_ATTACHED ? device_detach(child) : 0; } else { error = BUS_SUSPEND_CHILD(dev, child); } if (error == 0) { error = BUS_RESET_PREPARE(dev, child); if (error != 0) { if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } } if (error != 0) { bus_helper_reset_prepare_rollback(dev, child, flags); return (error); } } return (0); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the first part of the ascii representation of * @p child, including its name, unit and description (if any - see * device_set_desc()). * * @returns the number of characters printed */ int bus_print_child_header(device_t dev, device_t child) { int retval = 0; if (device_get_desc(child)) { retval += device_printf(child, "<%s>", device_get_desc(child)); } else { retval += printf("%s", device_get_nameunit(child)); } return (retval); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the last part of the ascii representation of * @p child, which consists of the string @c " on " followed by the * name and unit of the @p dev. * * @returns the number of characters printed */ int bus_print_child_footer(device_t dev, device_t child) { return (printf(" on %s\n", device_get_nameunit(dev))); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints out the VM domain for the given device. * * @returns the number of characters printed */ int bus_print_child_domain(device_t dev, device_t child) { int domain; /* No domain? Don't print anything */ if (BUS_GET_DOMAIN(dev, child, &domain) != 0) return (0); return (printf(" numa-domain %d", domain)); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function simply calls bus_print_child_header() followed by * bus_print_child_footer(). * * @returns the number of characters printed */ int bus_generic_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } /** * @brief Stub function for implementing BUS_READ_IVAR(). * * @returns ENOENT */ int bus_generic_read_ivar(device_t dev, device_t child, int index, uintptr_t * result) { return (ENOENT); } /** * @brief Stub function for implementing BUS_WRITE_IVAR(). * * @returns ENOENT */ int bus_generic_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * @brief Stub function for implementing BUS_GET_RESOURCE_LIST(). * * @returns NULL */ struct resource_list * bus_generic_get_resource_list(device_t dev, device_t child) { return (NULL); } /** * @brief Helper function for implementing BUS_DRIVER_ADDED(). * * This implementation of BUS_DRIVER_ADDED() simply calls the driver's * DEVICE_IDENTIFY() method to allow it to add new children to the bus * and then calls device_probe_and_attach() for each unattached child. */ void bus_generic_driver_added(device_t dev, driver_t *driver) { device_t child; DEVICE_IDENTIFY(driver, dev); TAILQ_FOREACH(child, &dev->children, link) { if (child->state == DS_NOTPRESENT || (child->flags & DF_REBID)) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_NEW_PASS(). * * This implementing of BUS_NEW_PASS() first calls the identify * routines for any drivers that probe at the current pass. Then it * walks the list of devices for this bus. If a device is already * attached, then it calls BUS_NEW_PASS() on that device. If the * device is not already attached, it attempts to attach a driver to * it. */ void bus_generic_new_pass(device_t dev) { driverlink_t dl; devclass_t dc; device_t child; dc = dev->devclass; TAILQ_FOREACH(dl, &dc->drivers, link) { if (dl->pass == bus_current_pass) DEVICE_IDENTIFY(dl->driver, dev); } TAILQ_FOREACH(child, &dev->children, link) { if (child->state >= DS_ATTACHED) BUS_NEW_PASS(child); else if (child->state == DS_NOTPRESENT) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_SETUP_INTR(). * * This simple implementation of BUS_SETUP_INTR() simply calls the * BUS_SETUP_INTR() method of the parent of @p dev. */ int bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_SETUP_INTR(dev->parent, child, irq, flags, filter, intr, arg, cookiep)); return (EINVAL); } /** * @brief Helper function for implementing BUS_TEARDOWN_INTR(). * * This simple implementation of BUS_TEARDOWN_INTR() simply calls the * BUS_TEARDOWN_INTR() method of the parent of @p dev. */ int bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_TEARDOWN_INTR(dev->parent, child, irq, cookie)); return (EINVAL); } /** * @brief Helper function for implementing BUS_SUSPEND_INTR(). * * This simple implementation of BUS_SUSPEND_INTR() simply calls the * BUS_SUSPEND_INTR() method of the parent of @p dev. */ int bus_generic_suspend_intr(device_t dev, device_t child, struct resource *irq) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_SUSPEND_INTR(dev->parent, child, irq)); return (EINVAL); } /** * @brief Helper function for implementing BUS_RESUME_INTR(). * * This simple implementation of BUS_RESUME_INTR() simply calls the * BUS_RESUME_INTR() method of the parent of @p dev. */ int bus_generic_resume_intr(device_t dev, device_t child, struct resource *irq) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_RESUME_INTR(dev->parent, child, irq)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ADJUST_RESOURCE(). * * This simple implementation of BUS_ADJUST_RESOURCE() simply calls the * BUS_ADJUST_RESOURCE() method of the parent of @p dev. */ int bus_generic_adjust_resource(device_t dev, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ADJUST_RESOURCE(dev->parent, child, type, r, start, end)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This simple implementation of BUS_ALLOC_RESOURCE() simply calls the * BUS_ALLOC_RESOURCE() method of the parent of @p dev. */ struct resource * bus_generic_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ALLOC_RESOURCE(dev->parent, child, type, rid, start, end, count, flags)); return (NULL); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This simple implementation of BUS_RELEASE_RESOURCE() simply calls the * BUS_RELEASE_RESOURCE() method of the parent of @p dev. */ int bus_generic_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_RELEASE_RESOURCE(dev->parent, child, type, rid, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ACTIVATE_RESOURCE(). * * This simple implementation of BUS_ACTIVATE_RESOURCE() simply calls the * BUS_ACTIVATE_RESOURCE() method of the parent of @p dev. */ int bus_generic_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ACTIVATE_RESOURCE(dev->parent, child, type, rid, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DEACTIVATE_RESOURCE(). * * This simple implementation of BUS_DEACTIVATE_RESOURCE() simply calls the * BUS_DEACTIVATE_RESOURCE() method of the parent of @p dev. */ int bus_generic_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_DEACTIVATE_RESOURCE(dev->parent, child, type, rid, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_MAP_RESOURCE(). * * This simple implementation of BUS_MAP_RESOURCE() simply calls the * BUS_MAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_MAP_RESOURCE(dev->parent, child, type, r, args, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_UNMAP_RESOURCE(). * * This simple implementation of BUS_UNMAP_RESOURCE() simply calls the * BUS_UNMAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_UNMAP_RESOURCE(dev->parent, child, type, r, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_BIND_INTR(). * * This simple implementation of BUS_BIND_INTR() simply calls the * BUS_BIND_INTR() method of the parent of @p dev. */ int bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_BIND_INTR(dev->parent, child, irq, cpu)); return (EINVAL); } /** * @brief Helper function for implementing BUS_CONFIG_INTR(). * * This simple implementation of BUS_CONFIG_INTR() simply calls the * BUS_CONFIG_INTR() method of the parent of @p dev. */ int bus_generic_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_CONFIG_INTR(dev->parent, irq, trig, pol)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DESCRIBE_INTR(). * * This simple implementation of BUS_DESCRIBE_INTR() simply calls the * BUS_DESCRIBE_INTR() method of the parent of @p dev. */ int bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_DESCRIBE_INTR(dev->parent, child, irq, cookie, descr)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_CPUS(). * * This simple implementation of BUS_GET_CPUS() simply calls the * BUS_GET_CPUS() method of the parent of @p dev. */ int bus_generic_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_CPUS(dev->parent, child, op, setsize, cpuset)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_DMA_TAG(). * * This simple implementation of BUS_GET_DMA_TAG() simply calls the * BUS_GET_DMA_TAG() method of the parent of @p dev. */ bus_dma_tag_t bus_generic_get_dma_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_DMA_TAG(dev->parent, child)); return (NULL); } /** * @brief Helper function for implementing BUS_GET_BUS_TAG(). * * This simple implementation of BUS_GET_BUS_TAG() simply calls the * BUS_GET_BUS_TAG() method of the parent of @p dev. */ bus_space_tag_t bus_generic_get_bus_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_BUS_TAG(dev->parent, child)); return ((bus_space_tag_t)0); } /** * @brief Helper function for implementing BUS_GET_RESOURCE(). * * This implementation of BUS_GET_RESOURCE() uses the * resource_list_find() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * search. */ int bus_generic_rl_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct resource_list * rl = NULL; struct resource_list_entry * rle = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); rle = resource_list_find(rl, type, rid); if (!rle) return (ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return (0); } /** * @brief Helper function for implementing BUS_SET_RESOURCE(). * * This implementation of BUS_SET_RESOURCE() uses the * resource_list_add() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ int bus_generic_rl_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); resource_list_add(rl, type, rid, start, (start + count - 1), count); return (0); } /** * @brief Helper function for implementing BUS_DELETE_RESOURCE(). * * This implementation of BUS_DELETE_RESOURCE() uses the * resource_list_delete() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ void bus_generic_rl_delete_resource(device_t dev, device_t child, int type, int rid) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return; resource_list_delete(rl, type, rid); return; } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This implementation of BUS_RELEASE_RESOURCE() uses the * resource_list_release() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ int bus_generic_rl_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); return (resource_list_release(rl, dev, child, type, rid, r)); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This implementation of BUS_ALLOC_RESOURCE() uses the * resource_list_alloc() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ struct resource * bus_generic_rl_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (NULL); return (resource_list_alloc(rl, dev, child, type, rid, start, end, count, flags)); } /** * @brief Helper function for implementing BUS_CHILD_PRESENT(). * * This simple implementation of BUS_CHILD_PRESENT() simply calls the * BUS_CHILD_PRESENT() method of the parent of @p dev. */ int bus_generic_child_present(device_t dev, device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(dev), dev)); } int bus_generic_get_domain(device_t dev, device_t child, int *domain) { if (dev->parent) return (BUS_GET_DOMAIN(dev->parent, dev, domain)); return (ENOENT); } /** * @brief Helper function for implementing BUS_RESCAN(). * * This null implementation of BUS_RESCAN() always fails to indicate * the bus does not support rescanning. */ int bus_null_rescan(device_t dev) { return (ENXIO); } /* * Some convenience functions to make it easier for drivers to use the * resource-management functions. All these really do is hide the * indirection through the parent's method table, making for slightly * less-wordy code. In the future, it might make sense for this code * to maintain some sort of a list of resources allocated by each device. */ int bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) res[i] = NULL; for (i = 0; rs[i].type != -1; i++) { res[i] = bus_alloc_resource_any(dev, rs[i].type, &rs[i].rid, rs[i].flags); if (res[i] == NULL && !(rs[i].flags & RF_OPTIONAL)) { bus_release_resources(dev, rs, res); return (ENXIO); } } return (0); } void bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) if (res[i] != NULL) { bus_release_resource( dev, rs[i].type, rs[i].rid, res[i]); res[i] = NULL; } } /** * @brief Wrapper function for BUS_ALLOC_RESOURCE(). * * This function simply calls the BUS_ALLOC_RESOURCE() method of the * parent of @p dev. */ struct resource * bus_alloc_resource(device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (dev->parent == NULL) return (NULL); res = BUS_ALLOC_RESOURCE(dev->parent, dev, type, rid, start, end, count, flags); return (res); } /** * @brief Wrapper function for BUS_ADJUST_RESOURCE(). * * This function simply calls the BUS_ADJUST_RESOURCE() method of the * parent of @p dev. */ int bus_adjust_resource(device_t dev, int type, struct resource *r, rman_res_t start, rman_res_t end) { if (dev->parent == NULL) return (EINVAL); return (BUS_ADJUST_RESOURCE(dev->parent, dev, type, r, start, end)); } /** * @brief Wrapper function for BUS_ACTIVATE_RESOURCE(). * * This function simply calls the BUS_ACTIVATE_RESOURCE() method of the * parent of @p dev. */ int bus_activate_resource(device_t dev, int type, int rid, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_ACTIVATE_RESOURCE(dev->parent, dev, type, rid, r)); } /** * @brief Wrapper function for BUS_DEACTIVATE_RESOURCE(). * * This function simply calls the BUS_DEACTIVATE_RESOURCE() method of the * parent of @p dev. */ int bus_deactivate_resource(device_t dev, int type, int rid, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_DEACTIVATE_RESOURCE(dev->parent, dev, type, rid, r)); } /** * @brief Wrapper function for BUS_MAP_RESOURCE(). * * This function simply calls the BUS_MAP_RESOURCE() method of the * parent of @p dev. */ int bus_map_resource(device_t dev, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_MAP_RESOURCE(dev->parent, dev, type, r, args, map)); } /** * @brief Wrapper function for BUS_UNMAP_RESOURCE(). * * This function simply calls the BUS_UNMAP_RESOURCE() method of the * parent of @p dev. */ int bus_unmap_resource(device_t dev, int type, struct resource *r, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_UNMAP_RESOURCE(dev->parent, dev, type, r, map)); } /** * @brief Wrapper function for BUS_RELEASE_RESOURCE(). * * This function simply calls the BUS_RELEASE_RESOURCE() method of the * parent of @p dev. */ int bus_release_resource(device_t dev, int type, int rid, struct resource *r) { int rv; if (dev->parent == NULL) return (EINVAL); rv = BUS_RELEASE_RESOURCE(dev->parent, dev, type, rid, r); return (rv); } /** * @brief Wrapper function for BUS_SETUP_INTR(). * * This function simply calls the BUS_SETUP_INTR() method of the * parent of @p dev. */ int bus_setup_intr(device_t dev, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { int error; if (dev->parent == NULL) return (EINVAL); error = BUS_SETUP_INTR(dev->parent, dev, r, flags, filter, handler, arg, cookiep); if (error != 0) return (error); if (handler != NULL && !(flags & INTR_MPSAFE)) device_printf(dev, "[GIANT-LOCKED]\n"); return (0); } /** * @brief Wrapper function for BUS_TEARDOWN_INTR(). * * This function simply calls the BUS_TEARDOWN_INTR() method of the * parent of @p dev. */ int bus_teardown_intr(device_t dev, struct resource *r, void *cookie) { if (dev->parent == NULL) return (EINVAL); return (BUS_TEARDOWN_INTR(dev->parent, dev, r, cookie)); } /** * @brief Wrapper function for BUS_SUSPEND_INTR(). * * This function simply calls the BUS_SUSPEND_INTR() method of the * parent of @p dev. */ int bus_suspend_intr(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_SUSPEND_INTR(dev->parent, dev, r)); } /** * @brief Wrapper function for BUS_RESUME_INTR(). * * This function simply calls the BUS_RESUME_INTR() method of the * parent of @p dev. */ int bus_resume_intr(device_t dev, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_RESUME_INTR(dev->parent, dev, r)); } /** * @brief Wrapper function for BUS_BIND_INTR(). * * This function simply calls the BUS_BIND_INTR() method of the * parent of @p dev. */ int bus_bind_intr(device_t dev, struct resource *r, int cpu) { if (dev->parent == NULL) return (EINVAL); return (BUS_BIND_INTR(dev->parent, dev, r, cpu)); } /** * @brief Wrapper function for BUS_DESCRIBE_INTR(). * * This function first formats the requested description into a * temporary buffer and then calls the BUS_DESCRIBE_INTR() method of * the parent of @p dev. */ int bus_describe_intr(device_t dev, struct resource *irq, void *cookie, const char *fmt, ...) { va_list ap; char descr[MAXCOMLEN + 1]; if (dev->parent == NULL) return (EINVAL); va_start(ap, fmt); vsnprintf(descr, sizeof(descr), fmt, ap); va_end(ap); return (BUS_DESCRIBE_INTR(dev->parent, dev, irq, cookie, descr)); } /** * @brief Wrapper function for BUS_SET_RESOURCE(). * * This function simply calls the BUS_SET_RESOURCE() method of the * parent of @p dev. */ int bus_set_resource(device_t dev, int type, int rid, rman_res_t start, rman_res_t count) { return (BUS_SET_RESOURCE(device_get_parent(dev), dev, type, rid, start, count)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev. */ int bus_get_resource(device_t dev, int type, int rid, rman_res_t *startp, rman_res_t *countp) { return (BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, startp, countp)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the start value. */ rman_res_t bus_get_resource_start(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (start); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the count value. */ rman_res_t bus_get_resource_count(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (count); } /** * @brief Wrapper function for BUS_DELETE_RESOURCE(). * * This function simply calls the BUS_DELETE_RESOURCE() method of the * parent of @p dev. */ void bus_delete_resource(device_t dev, int type, int rid) { BUS_DELETE_RESOURCE(device_get_parent(dev), dev, type, rid); } /** * @brief Wrapper function for BUS_CHILD_PRESENT(). * * This function simply calls the BUS_CHILD_PRESENT() method of the * parent of @p dev. */ int bus_child_present(device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(child), child)); } /** - * @brief Wrapper function for BUS_CHILD_PNPINFO_STR(). + * @brief Wrapper function for BUS_CHILD_PNPINFO(). * - * This function simply calls the BUS_CHILD_PNPINFO_STR() method of the - * parent of @p dev. + * This function simply calls the BUS_CHILD_PNPINFO() method of the parent of @p + * dev. */ int -bus_child_pnpinfo_str(device_t child, char *buf, size_t buflen) +bus_child_pnpinfo(device_t child, struct sbuf *sb) { device_t parent; parent = device_get_parent(child); - if (parent == NULL) { - *buf = '\0'; + if (parent == NULL) return (0); - } - return (BUS_CHILD_PNPINFO_STR(parent, child, buf, buflen)); + return (BUS_CHILD_PNPINFO(parent, child, sb)); } /** - * @brief Wrapper function for BUS_CHILD_LOCATION_STR(). + * @brief Generic implementation that does nothing for bus_child_pnpinfo * - * This function simply calls the BUS_CHILD_LOCATION_STR() method of the - * parent of @p dev. + * This function has the right signature and returns 0 since the sbuf is passed + * to us to append to. */ int -bus_child_location_str(device_t child, char *buf, size_t buflen) +bus_generic_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) { - device_t parent; - - parent = device_get_parent(child); - if (parent == NULL) { - *buf = '\0'; - return (0); - } - return (BUS_CHILD_LOCATION_STR(parent, child, buf, buflen)); + return (0); } /** - * @brief Wrapper function for bus_child_pnpinfo_str using sbuf + * @brief Wrapper function for BUS_CHILD_LOCATION(). * - * A convenient wrapper frunction for bus_child_pnpinfo_str that allows - * us to splat that into an sbuf. It uses unholy knowledge of sbuf to - * accomplish this, however. It is an interim function until we can convert - * this interface more fully. + * This function simply calls the BUS_CHILD_LOCATION() method of the parent of + * @p dev. */ -/* Note: we reach inside of sbuf because it's API isn't rich enough to do this */ -#define SPACE(s) ((s)->s_size - (s)->s_len) -#define EOB(s) ((s)->s_buf + (s)->s_len) - -static int -bus_child_pnpinfo_sb(device_t dev, struct sbuf *sb) +int +bus_child_location(device_t child, struct sbuf *sb) { - char *p; - ssize_t space; + device_t parent; - MPASS((sb->s_flags & SBUF_INCLUDENUL) == 0); - MPASS(sb->s_size >= sb->s_len); - if (sb->s_error != 0) - return (-1); - space = SPACE(sb); - if (space <= 1) { - sb->s_error = ENOMEM; - return (-1); - } - p = EOB(sb); - *p = '\0'; /* sbuf buffer isn't NUL terminated until sbuf_finish() */ - bus_child_pnpinfo_str(dev, p, space); - sb->s_len += strlen(p); - return (0); + parent = device_get_parent(child); + if (parent == NULL) + return (0); + return (BUS_CHILD_LOCATION(parent, child, sb)); } /** - * @brief Wrapper function for bus_child_pnpinfo_str using sbuf + * @brief Generic implementation that does nothing for bus_child_location * - * A convenient wrapper frunction for bus_child_pnpinfo_str that allows - * us to splat that into an sbuf. It uses unholy knowledge of sbuf to - * accomplish this, however. It is an interim function until we can convert - * this interface more fully. + * This function has the right signature and returns 0 since the sbuf is passed + * to us to append to. */ -static int -bus_child_location_sb(device_t dev, struct sbuf *sb) +int +bus_generic_child_location(device_t dev, device_t child, struct sbuf *sb) { - char *p; - ssize_t space; - - MPASS((sb->s_flags & SBUF_INCLUDENUL) == 0); - MPASS(sb->s_size >= sb->s_len); - if (sb->s_error != 0) - return (-1); - space = SPACE(sb); - if (space <= 1) { - sb->s_error = ENOMEM; - return (-1); - } - p = EOB(sb); - *p = '\0'; /* sbuf buffer isn't NUL terminated until sbuf_finish() */ - bus_child_location_str(dev, p, space); - sb->s_len += strlen(p); return (0); } -#undef SPACE -#undef EOB /** * @brief Wrapper function for BUS_GET_CPUS(). * * This function simply calls the BUS_GET_CPUS() method of the * parent of @p dev. */ int bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (EINVAL); return (BUS_GET_CPUS(parent, dev, op, setsize, cpuset)); } /** * @brief Wrapper function for BUS_GET_DMA_TAG(). * * This function simply calls the BUS_GET_DMA_TAG() method of the * parent of @p dev. */ bus_dma_tag_t bus_get_dma_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (NULL); return (BUS_GET_DMA_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_BUS_TAG(). * * This function simply calls the BUS_GET_BUS_TAG() method of the * parent of @p dev. */ bus_space_tag_t bus_get_bus_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return ((bus_space_tag_t)0); return (BUS_GET_BUS_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_DOMAIN(). * * This function simply calls the BUS_GET_DOMAIN() method of the * parent of @p dev. */ int bus_get_domain(device_t dev, int *domain) { return (BUS_GET_DOMAIN(device_get_parent(dev), dev, domain)); } /* Resume all devices and then notify userland that we're up again. */ static int root_resume(device_t dev) { int error; error = bus_generic_resume(dev); if (error == 0) { devctl_notify("kern", "power", "resume", NULL); /* Deprecated gone in 14 */ devctl_notify("kernel", "power", "resume", NULL); } return (error); } static int root_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += printf("\n"); return (retval); } static int root_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* * If an interrupt mapping gets to here something bad has happened. */ panic("root_setup_intr"); } /* * If we get here, assume that the device is permanent and really is * present in the system. Removable bus drivers are expected to intercept * this call long before it gets here. We return -1 so that drivers that * really care can check vs -1 or some ERRNO returned higher in the food * chain. */ static int root_child_present(device_t dev, device_t child) { return (-1); } static int root_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { switch (op) { case INTR_CPUS: /* Default to returning the set of all CPUs. */ if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = all_cpus; return (0); default: return (EINVAL); } } static kobj_method_t root_methods[] = { /* Device interface */ KOBJMETHOD(device_shutdown, bus_generic_shutdown), KOBJMETHOD(device_suspend, bus_generic_suspend), KOBJMETHOD(device_resume, root_resume), /* Bus interface */ KOBJMETHOD(bus_print_child, root_print_child), KOBJMETHOD(bus_read_ivar, bus_generic_read_ivar), KOBJMETHOD(bus_write_ivar, bus_generic_write_ivar), KOBJMETHOD(bus_setup_intr, root_setup_intr), KOBJMETHOD(bus_child_present, root_child_present), KOBJMETHOD(bus_get_cpus, root_get_cpus), KOBJMETHOD_END }; static driver_t root_driver = { "root", root_methods, 1, /* no softc */ }; device_t root_bus; devclass_t root_devclass; static int root_bus_module_handler(module_t mod, int what, void* arg) { switch (what) { case MOD_LOAD: TAILQ_INIT(&bus_data_devices); kobj_class_compile((kobj_class_t) &root_driver); root_bus = make_device(NULL, "root", 0); root_bus->desc = "System root bus"; kobj_init((kobj_t) root_bus, (kobj_class_t) &root_driver); root_bus->driver = &root_driver; root_bus->state = DS_ATTACHED; root_devclass = devclass_find_internal("root", NULL, FALSE); devinit(); return (0); case MOD_SHUTDOWN: device_shutdown(root_bus); return (0); default: return (EOPNOTSUPP); } return (0); } static moduledata_t root_bus_mod = { "rootbus", root_bus_module_handler, NULL }; DECLARE_MODULE(rootbus, root_bus_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); /** * @brief Automatically configure devices * * This function begins the autoconfiguration process by calling * device_probe_and_attach() for each child of the @c root0 device. */ void root_bus_configure(void) { PDEBUG((".")); /* Eventually this will be split up, but this is sufficient for now. */ bus_set_pass(BUS_PASS_DEFAULT); } /** * @brief Module handler for registering device drivers * * This module handler is used to automatically register device * drivers when modules are loaded. If @p what is MOD_LOAD, it calls * devclass_add_driver() for the driver described by the * driver_module_data structure pointed to by @p arg */ int driver_module_handler(module_t mod, int what, void *arg) { struct driver_module_data *dmd; devclass_t bus_devclass; kobj_class_t driver; int error, pass; dmd = (struct driver_module_data *)arg; bus_devclass = devclass_find_internal(dmd->dmd_busname, NULL, TRUE); error = 0; switch (what) { case MOD_LOAD: if (dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); pass = dmd->dmd_pass; driver = dmd->dmd_driver; PDEBUG(("Loading module: driver %s on bus %s (pass %d)", DRIVERNAME(driver), dmd->dmd_busname, pass)); error = devclass_add_driver(bus_devclass, driver, pass, dmd->dmd_devclass); break; case MOD_UNLOAD: PDEBUG(("Unloading module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_delete_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; case MOD_QUIESCE: PDEBUG(("Quiesce module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_quiesce_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; default: error = EOPNOTSUPP; break; } return (error); } /** * @brief Enumerate all hinted devices for this bus. * * Walks through the hints for this bus and calls the bus_hinted_child * routine for each one it fines. It searches first for the specific * bus that's being probed for hinted children (eg isa0), and then for * generic children (eg isa). * * @param dev bus device to enumerate */ void bus_enumerate_hinted_children(device_t bus) { int i; const char *dname, *busname; int dunit; /* * enumerate all devices on the specific bus */ busname = device_get_nameunit(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); /* * and all the generic ones. */ busname = device_get_name(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); } #ifdef BUS_DEBUG /* the _short versions avoid iteration by not calling anything that prints * more than oneliners. I love oneliners. */ static void print_device_short(device_t dev, int indent) { if (!dev) return; indentprintf(("device %d: <%s> %sparent,%schildren,%s%s%s%s%s%s,%sivars,%ssoftc,busy=%d\n", dev->unit, dev->desc, (dev->parent? "":"no "), (TAILQ_EMPTY(&dev->children)? "no ":""), (dev->flags&DF_ENABLED? "enabled,":"disabled,"), (dev->flags&DF_FIXEDCLASS? "fixed,":""), (dev->flags&DF_WILDCARD? "wildcard,":""), (dev->flags&DF_DESCMALLOCED? "descmalloced,":""), (dev->flags&DF_REBID? "rebiddable,":""), (dev->flags&DF_SUSPENDED? "suspended,":""), (dev->ivars? "":"no "), (dev->softc? "":"no "), dev->busy)); } static void print_device(device_t dev, int indent) { if (!dev) return; print_device_short(dev, indent); indentprintf(("Parent:\n")); print_device_short(dev->parent, indent+1); indentprintf(("Driver:\n")); print_driver_short(dev->driver, indent+1); indentprintf(("Devclass:\n")); print_devclass_short(dev->devclass, indent+1); } void print_device_tree_short(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device_short(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree_short(child, indent+1); } } void print_device_tree(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree(child, indent+1); } } static void print_driver_short(driver_t *driver, int indent) { if (!driver) return; indentprintf(("driver %s: softc size = %zd\n", driver->name, driver->size)); } static void print_driver(driver_t *driver, int indent) { if (!driver) return; print_driver_short(driver, indent); } static void print_driver_list(driver_list_t drivers, int indent) { driverlink_t driver; TAILQ_FOREACH(driver, &drivers, link) { print_driver(driver->driver, indent); } } static void print_devclass_short(devclass_t dc, int indent) { if ( !dc ) return; indentprintf(("devclass %s: max units = %d\n", dc->name, dc->maxunit)); } static void print_devclass(devclass_t dc, int indent) { int i; if ( !dc ) return; print_devclass_short(dc, indent); indentprintf(("Drivers:\n")); print_driver_list(dc->drivers, indent+1); indentprintf(("Devices:\n")); for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) print_device(dc->devices[i], indent+1); } void print_devclass_list_short(void) { devclass_t dc; printf("Short listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass_short(dc, 0); } } void print_devclass_list(void) { devclass_t dc; printf("Full listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass(dc, 0); } } #endif /* * User-space access to the device tree. * * We implement a small set of nodes: * * hw.bus Single integer read method to obtain the * current generation count. * hw.bus.devices Reads the entire device tree in flat space. * hw.bus.rman Resource manager interface * * We might like to add the ability to scan devclasses and/or drivers to * determine what else is currently loaded/available. */ static int sysctl_bus_info(SYSCTL_HANDLER_ARGS) { struct u_businfo ubus; ubus.ub_version = BUS_USER_VERSION; ubus.ub_generation = bus_data_generation; return (SYSCTL_OUT(req, &ubus, sizeof(ubus))); } SYSCTL_PROC(_hw_bus, OID_AUTO, info, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_bus_info, "S,u_businfo", "bus-related data"); static int sysctl_devices(SYSCTL_HANDLER_ARGS) { struct sbuf sb; int *name = (int *)arg1; u_int namelen = arg2; int index; device_t dev; struct u_device *udev; int error; if (namelen != 2) return (EINVAL); if (bus_data_generation_check(name[0])) return (EINVAL); index = name[1]; /* * Scan the list of devices, looking for the requested index. */ TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (index-- == 0) break; } if (dev == NULL) return (ENOENT); /* * Populate the return item, careful not to overflow the buffer. */ udev = malloc(sizeof(*udev), M_BUS, M_WAITOK | M_ZERO); if (udev == NULL) return (ENOMEM); udev->dv_handle = (uintptr_t)dev; udev->dv_parent = (uintptr_t)dev->parent; udev->dv_devflags = dev->devflags; udev->dv_flags = dev->flags; udev->dv_state = dev->state; sbuf_new(&sb, udev->dv_fields, sizeof(udev->dv_fields), SBUF_FIXEDLEN); if (dev->nameunit != NULL) sbuf_cat(&sb, dev->nameunit); sbuf_putc(&sb, '\0'); if (dev->desc != NULL) sbuf_cat(&sb, dev->desc); sbuf_putc(&sb, '\0'); if (dev->driver != NULL) sbuf_cat(&sb, dev->driver->name); sbuf_putc(&sb, '\0'); - bus_child_pnpinfo_sb(dev, &sb); + bus_child_pnpinfo(dev, &sb); sbuf_putc(&sb, '\0'); - bus_child_location_sb(dev, &sb); + bus_child_location(dev, &sb); sbuf_putc(&sb, '\0'); error = sbuf_finish(&sb); if (error == 0) error = SYSCTL_OUT(req, udev, sizeof(*udev)); sbuf_delete(&sb); free(udev, M_BUS); return (error); } SYSCTL_NODE(_hw_bus, OID_AUTO, devices, CTLFLAG_RD | CTLFLAG_NEEDGIANT, sysctl_devices, "system device tree"); int bus_data_generation_check(int generation) { if (generation != bus_data_generation) return (1); /* XXX generate optimised lists here? */ return (0); } void bus_data_generation_update(void) { atomic_add_int(&bus_data_generation, 1); } int bus_free_resource(device_t dev, int type, struct resource *r) { if (r == NULL) return (0); return (bus_release_resource(dev, type, rman_get_rid(r), r)); } device_t device_lookup_by_name(const char *name) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (dev->nameunit != NULL && strcmp(dev->nameunit, name) == 0) return (dev); } return (NULL); } /* * /dev/devctl2 implementation. The existing /dev/devctl device has * implicit semantics on open, so it could not be reused for this. * Another option would be to call this /dev/bus? */ static int find_device(struct devreq *req, device_t *devp) { device_t dev; /* * First, ensure that the name is nul terminated. */ if (memchr(req->dr_name, '\0', sizeof(req->dr_name)) == NULL) return (EINVAL); /* * Second, try to find an attached device whose name matches * 'name'. */ dev = device_lookup_by_name(req->dr_name); if (dev != NULL) { *devp = dev; return (0); } /* Finally, give device enumerators a chance. */ dev = NULL; EVENTHANDLER_DIRECT_INVOKE(dev_lookup, req->dr_name, &dev); if (dev == NULL) return (ENOENT); *devp = dev; return (0); } static bool driver_exists(device_t bus, const char *driver) { devclass_t dc; for (dc = bus->devclass; dc != NULL; dc = dc->parent) { if (devclass_find_driver_internal(dc, driver) != NULL) return (true); } return (false); } static void device_gen_nomatch(device_t dev) { device_t child; if (dev->flags & DF_NEEDNOMATCH && dev->state == DS_NOTPRESENT) { BUS_PROBE_NOMATCH(dev->parent, dev); devnomatch(dev); dev->flags |= DF_DONENOMATCH; } dev->flags &= ~DF_NEEDNOMATCH; TAILQ_FOREACH(child, &dev->children, link) { device_gen_nomatch(child); } } static void device_do_deferred_actions(void) { devclass_t dc; driverlink_t dl; /* * Walk through the devclasses to find all the drivers we've tagged as * deferred during the freeze and call the driver added routines. They * have already been added to the lists in the background, so the driver * added routines that trigger a probe will have all the right bidders * for the probe auction. */ TAILQ_FOREACH(dc, &devclasses, link) { TAILQ_FOREACH(dl, &dc->drivers, link) { if (dl->flags & DL_DEFERRED_PROBE) { devclass_driver_added(dc, dl->driver); dl->flags &= ~DL_DEFERRED_PROBE; } } } /* * We also defer no-match events during a freeze. Walk the tree and * generate all the pent-up events that are still relevant. */ device_gen_nomatch(root_bus); bus_data_generation_update(); } static int devctl2_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct devreq *req; device_t dev; int error, old; /* Locate the device to control. */ mtx_lock(&Giant); req = (struct devreq *)data; switch (cmd) { case DEV_ATTACH: case DEV_DETACH: case DEV_ENABLE: case DEV_DISABLE: case DEV_SUSPEND: case DEV_RESUME: case DEV_SET_DRIVER: case DEV_CLEAR_DRIVER: case DEV_RESCAN: case DEV_DELETE: case DEV_RESET: error = priv_check(td, PRIV_DRIVER); if (error == 0) error = find_device(req, &dev); break; case DEV_FREEZE: case DEV_THAW: error = priv_check(td, PRIV_DRIVER); break; default: error = ENOTTY; break; } if (error) { mtx_unlock(&Giant); return (error); } /* Perform the requested operation. */ switch (cmd) { case DEV_ATTACH: if (device_is_attached(dev) && (dev->flags & DF_REBID) == 0) error = EBUSY; else if (!device_is_enabled(dev)) error = ENXIO; else error = device_probe_and_attach(dev); break; case DEV_DETACH: if (!device_is_attached(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } error = device_detach(dev); break; case DEV_ENABLE: if (device_is_enabled(dev)) { error = EBUSY; break; } /* * If the device has been probed but not attached (e.g. * when it has been disabled by a loader hint), just * attach the device rather than doing a full probe. */ device_enable(dev); if (device_is_alive(dev)) { /* * If the device was disabled via a hint, clear * the hint. */ if (resource_disabled(dev->driver->name, dev->unit)) resource_unset_value(dev->driver->name, dev->unit, "disabled"); error = device_attach(dev); } else error = device_probe_and_attach(dev); break; case DEV_DISABLE: if (!device_is_enabled(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } /* * Force DF_FIXEDCLASS on around detach to preserve * the existing name. */ old = dev->flags; dev->flags |= DF_FIXEDCLASS; error = device_detach(dev); if (!(old & DF_FIXEDCLASS)) dev->flags &= ~DF_FIXEDCLASS; if (error == 0) device_disable(dev); break; case DEV_SUSPEND: if (device_is_suspended(dev)) { error = EBUSY; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_SUSPEND_CHILD(device_get_parent(dev), dev); break; case DEV_RESUME: if (!device_is_suspended(dev)) { error = EINVAL; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_RESUME_CHILD(device_get_parent(dev), dev); break; case DEV_SET_DRIVER: { devclass_t dc; char driver[128]; error = copyinstr(req->dr_data, driver, sizeof(driver), NULL); if (error) break; if (driver[0] == '\0') { error = EINVAL; break; } if (dev->devclass != NULL && strcmp(driver, dev->devclass->name) == 0) /* XXX: Could possibly force DF_FIXEDCLASS on? */ break; /* * Scan drivers for this device's bus looking for at * least one matching driver. */ if (dev->parent == NULL) { error = EINVAL; break; } if (!driver_exists(dev->parent, driver)) { error = ENOENT; break; } dc = devclass_create(driver); if (dc == NULL) { error = ENOMEM; break; } /* Detach device if necessary. */ if (device_is_attached(dev)) { if (req->dr_flags & DEVF_SET_DRIVER_DETACH) error = device_detach(dev); else error = EBUSY; if (error) break; } /* Clear any previously-fixed device class and unit. */ if (dev->flags & DF_FIXEDCLASS) devclass_delete_device(dev->devclass, dev); dev->flags |= DF_WILDCARD; dev->unit = -1; /* Force the new device class. */ error = devclass_add_device(dc, dev); if (error) break; dev->flags |= DF_FIXEDCLASS; error = device_probe_and_attach(dev); break; } case DEV_CLEAR_DRIVER: if (!(dev->flags & DF_FIXEDCLASS)) { error = 0; break; } if (device_is_attached(dev)) { if (req->dr_flags & DEVF_CLEAR_DRIVER_DETACH) error = device_detach(dev); else error = EBUSY; if (error) break; } dev->flags &= ~DF_FIXEDCLASS; dev->flags |= DF_WILDCARD; devclass_delete_device(dev->devclass, dev); error = device_probe_and_attach(dev); break; case DEV_RESCAN: if (!device_is_attached(dev)) { error = ENXIO; break; } error = BUS_RESCAN(dev); break; case DEV_DELETE: { device_t parent; parent = device_get_parent(dev); if (parent == NULL) { error = EINVAL; break; } if (!(req->dr_flags & DEVF_FORCE_DELETE)) { if (bus_child_present(dev) != 0) { error = EBUSY; break; } } error = device_delete_child(parent, dev); break; } case DEV_FREEZE: if (device_frozen) error = EBUSY; else device_frozen = true; break; case DEV_THAW: if (!device_frozen) error = EBUSY; else { device_do_deferred_actions(); device_frozen = false; } break; case DEV_RESET: if ((req->dr_flags & ~(DEVF_RESET_DETACH)) != 0) { error = EINVAL; break; } error = BUS_RESET_CHILD(device_get_parent(dev), dev, req->dr_flags); break; } mtx_unlock(&Giant); return (error); } static struct cdevsw devctl2_cdevsw = { .d_version = D_VERSION, .d_ioctl = devctl2_ioctl, .d_name = "devctl2", }; static void devctl2_init(void) { make_dev_credf(MAKEDEV_ETERNAL, &devctl2_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600, "devctl2"); } /* * APIs to manage deprecation and obsolescence. */ static int obsolete_panic = 0; SYSCTL_INT(_debug, OID_AUTO, obsolete_panic, CTLFLAG_RWTUN, &obsolete_panic, 0, "Panic when obsolete features are used (0 = never, 1 = if osbolete, " "2 = if deprecated)"); static void gone_panic(int major, int running, const char *msg) { switch (obsolete_panic) { case 0: return; case 1: if (running < major) return; /* FALLTHROUGH */ default: panic("%s", msg); } } void _gone_in(int major, const char *msg) { gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg); if (P_OSREL_MAJOR(__FreeBSD_version) >= major) printf("Obsolete code will be removed soon: %s\n", msg); else printf("Deprecated code (to be removed in FreeBSD %d): %s\n", major, msg); } void _gone_in_dev(device_t dev, int major, const char *msg) { gone_panic(major, P_OSREL_MAJOR(__FreeBSD_version), msg); if (P_OSREL_MAJOR(__FreeBSD_version) >= major) device_printf(dev, "Obsolete code will be removed soon: %s\n", msg); else device_printf(dev, "Deprecated code (to be removed in FreeBSD %d): %s\n", major, msg); } #ifdef DDB DB_SHOW_COMMAND(device, db_show_device) { device_t dev; if (!have_addr) return; dev = (device_t)addr; db_printf("name: %s\n", device_get_nameunit(dev)); db_printf(" driver: %s\n", DRIVERNAME(dev->driver)); db_printf(" class: %s\n", DEVCLANAME(dev->devclass)); db_printf(" addr: %p\n", dev); db_printf(" parent: %p\n", dev->parent); db_printf(" softc: %p\n", dev->softc); db_printf(" ivars: %p\n", dev->ivars); } DB_SHOW_ALL_COMMAND(devices, db_show_all_devices) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { db_show_device((db_expr_t)dev, true, count, modif); } } #endif diff --git a/sys/powerpc/ofw/ofw_pcibus.c b/sys/powerpc/ofw/ofw_pcibus.c index 47cb4665b832..21ce8b04e88a 100644 --- a/sys/powerpc/ofw/ofw_pcibus.c +++ b/sys/powerpc/ofw/ofw_pcibus.c @@ -1,430 +1,429 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * Copyright (c) 2003, Thomas Moestl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ofw_pcibus.h" #include "pcib_if.h" #include "pci_if.h" typedef uint32_t ofw_pci_intr_t; /* Methods */ static device_probe_t ofw_pcibus_probe; static device_attach_t ofw_pcibus_attach; static pci_alloc_devinfo_t ofw_pcibus_alloc_devinfo; static pci_assign_interrupt_t ofw_pcibus_assign_interrupt; static ofw_bus_get_devinfo_t ofw_pcibus_get_devinfo; static bus_child_deleted_t ofw_pcibus_child_deleted; -static int ofw_pcibus_child_pnpinfo_str_method(device_t cbdev, device_t child, - char *buf, size_t buflen); +static int ofw_pcibus_child_pnpinfo_method(device_t cbdev, device_t child, + struct sbuf *sb); static void ofw_pcibus_enum_devtree(device_t dev, u_int domain, u_int busno); static void ofw_pcibus_enum_bus(device_t dev, u_int domain, u_int busno); static device_method_t ofw_pcibus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ofw_pcibus_probe), DEVMETHOD(device_attach, ofw_pcibus_attach), /* Bus interface */ DEVMETHOD(bus_child_deleted, ofw_pcibus_child_deleted), - DEVMETHOD(bus_child_pnpinfo_str, ofw_pcibus_child_pnpinfo_str_method), + DEVMETHOD(bus_child_pnpinfo, ofw_pcibus_child_pnpinfo_method), DEVMETHOD(bus_rescan, bus_null_rescan), DEVMETHOD(bus_get_cpus, ofw_pcibus_get_cpus), DEVMETHOD(bus_get_domain, ofw_pcibus_get_domain), /* PCI interface */ DEVMETHOD(pci_alloc_devinfo, ofw_pcibus_alloc_devinfo), DEVMETHOD(pci_assign_interrupt, ofw_pcibus_assign_interrupt), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, ofw_pcibus_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static devclass_t pci_devclass; DEFINE_CLASS_1(pci, ofw_pcibus_driver, ofw_pcibus_methods, sizeof(struct pci_softc), pci_driver); EARLY_DRIVER_MODULE(ofw_pcibus, pcib, ofw_pcibus_driver, pci_devclass, 0, 0, BUS_PASS_BUS); MODULE_VERSION(ofw_pcibus, 1); MODULE_DEPEND(ofw_pcibus, pci, 1, 1, 1); static int ofw_devices_only = 0; TUNABLE_INT("hw.pci.ofw_devices_only", &ofw_devices_only); static int ofw_pcibus_probe(device_t dev) { if (ofw_bus_get_node(dev) == -1) return (ENXIO); device_set_desc(dev, "OFW PCI bus"); return (BUS_PROBE_DEFAULT); } static int ofw_pcibus_attach(device_t dev) { u_int busno, domain; int error; error = pci_attach_common(dev); if (error) return (error); domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); /* * Attach those children represented in the device tree. */ ofw_pcibus_enum_devtree(dev, domain, busno); /* * We now attach any laggard devices. FDT, for instance, allows * the device tree to enumerate only some PCI devices. Apple's * OF device tree on some Grackle-based hardware can also miss * functions on multi-function cards. */ if (!ofw_devices_only) ofw_pcibus_enum_bus(dev, domain, busno); return (bus_generic_attach(dev)); } struct pci_devinfo * ofw_pcibus_alloc_devinfo(device_t dev) { struct ofw_pcibus_devinfo *dinfo; dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO); return (&dinfo->opd_dinfo); } static void ofw_pcibus_enum_devtree(device_t dev, u_int domain, u_int busno) { device_t pcib; struct ofw_pci_register pcir; struct ofw_pcibus_devinfo *dinfo; phandle_t node, child; u_int func, slot; int intline; pcib = device_get_parent(dev); node = ofw_bus_get_node(dev); for (child = OF_child(node); child != 0; child = OF_peer(child)) { if (OF_getencprop(child, "reg", (pcell_t *)&pcir, sizeof(pcir)) == -1) continue; slot = OFW_PCI_PHYS_HI_DEVICE(pcir.phys_hi); func = OFW_PCI_PHYS_HI_FUNCTION(pcir.phys_hi); /* Some OFW device trees contain dupes. */ if (pci_find_dbsf(domain, busno, slot, func) != NULL) continue; /* * The preset in the intline register is usually bogus. Reset * it such that the PCI code will reroute the interrupt if * needed. */ intline = PCI_INVALID_IRQ; if (OF_getproplen(child, "interrupts") > 0) intline = 0; PCIB_WRITE_CONFIG(pcib, busno, slot, func, PCIR_INTLINE, intline, 1); /* * Now set up the PCI and OFW bus layer devinfo and add it * to the PCI bus. */ dinfo = (struct ofw_pcibus_devinfo *)pci_read_device(pcib, dev, domain, busno, slot, func); if (dinfo == NULL) continue; if (ofw_bus_gen_setup_devinfo(&dinfo->opd_obdinfo, child) != 0) { pci_freecfg((struct pci_devinfo *)dinfo); continue; } dinfo->opd_dma_tag = NULL; pci_add_child(dev, (struct pci_devinfo *)dinfo); /* * Some devices don't have an intpin set, but do have * interrupts. These are fully specified, and set in the * interrupts property, so add that value to the device's * resource list. */ if (dinfo->opd_dinfo.cfg.intpin == 0) ofw_bus_intr_to_rl(dev, child, &dinfo->opd_dinfo.resources, NULL); } } /* * The following is an almost exact clone of pci_add_children(), with the * addition that it (a) will not add children that have already been added, * and (b) will set up the OFW devinfo to point to invalid values. This is * to handle non-enumerated PCI children as exist in FDT and on the second * function of the Rage 128 in my Blue & White G3. */ static void ofw_pcibus_enum_bus(device_t dev, u_int domain, u_int busno) { device_t pcib; struct ofw_pcibus_devinfo *dinfo; int maxslots; int s, f, pcifunchigh; uint8_t hdrtype; pcib = device_get_parent(dev); maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++) { pcifunchigh = 0; f = 0; DELAY(1); hdrtype = PCIB_READ_CONFIG(pcib, busno, s, f, PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCI_FUNCMAX; for (f = 0; f <= pcifunchigh; f++) { /* Filter devices we have already added */ if (pci_find_dbsf(domain, busno, s, f) != NULL) continue; dinfo = (struct ofw_pcibus_devinfo *)pci_read_device( pcib, dev, domain, busno, s, f); if (dinfo == NULL) continue; dinfo->opd_dma_tag = NULL; dinfo->opd_obdinfo.obd_node = -1; dinfo->opd_obdinfo.obd_name = NULL; dinfo->opd_obdinfo.obd_compat = NULL; dinfo->opd_obdinfo.obd_type = NULL; dinfo->opd_obdinfo.obd_model = NULL; /* * For non OFW-devices, don't believe 0 * for an interrupt. */ if (dinfo->opd_dinfo.cfg.intline == 0) { dinfo->opd_dinfo.cfg.intline = PCI_INVALID_IRQ; PCIB_WRITE_CONFIG(pcib, busno, s, f, PCIR_INTLINE, PCI_INVALID_IRQ, 1); } pci_add_child(dev, (struct pci_devinfo *)dinfo); } } } static void ofw_pcibus_child_deleted(device_t dev, device_t child) { struct ofw_pcibus_devinfo *dinfo; dinfo = device_get_ivars(child); ofw_bus_gen_destroy_devinfo(&dinfo->opd_obdinfo); pci_child_deleted(dev, child); } static int -ofw_pcibus_child_pnpinfo_str_method(device_t cbdev, device_t child, char *buf, - size_t buflen) +ofw_pcibus_child_pnpinfo_method(device_t cbdev, device_t child, struct sbuf *sb) { - pci_child_pnpinfo_str_method(cbdev, child, buf, buflen); + pci_child_pnpinfo_method(cbdev, child, sb); if (ofw_bus_get_node(child) != -1) { - strlcat(buf, " ", buflen); /* Separate info */ - ofw_bus_gen_child_pnpinfo_str(cbdev, child, buf, buflen); + sbuf_cat(sb, " "); /* Separate info */ + ofw_bus_gen_child_pnpinfo(cbdev, child, sb); } return (0); } static int ofw_pcibus_assign_interrupt(device_t dev, device_t child) { ofw_pci_intr_t intr[2]; phandle_t node, iparent; int isz, icells; node = ofw_bus_get_node(child); if (node == -1) { /* Non-firmware enumerated child, use standard routing */ intr[0] = pci_get_intpin(child); return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child, intr[0])); } /* * Try to determine the node's interrupt parent so we know which * PIC to use. */ iparent = -1; if (OF_getencprop(node, "interrupt-parent", &iparent, sizeof(iparent)) < 0) iparent = -1; icells = 1; if (iparent != -1) OF_getencprop(OF_node_from_xref(iparent), "#interrupt-cells", &icells, sizeof(icells)); /* * Any AAPL,interrupts property gets priority and is * fully specified (i.e. does not need routing) */ isz = OF_getencprop(node, "AAPL,interrupts", intr, sizeof(intr)); if (isz == sizeof(intr[0])*icells) return ((iparent == -1) ? intr[0] : ofw_bus_map_intr(dev, iparent, icells, intr)); isz = OF_getencprop(node, "interrupts", intr, sizeof(intr)); if (isz == sizeof(intr[0])*icells) { if (iparent != -1) intr[0] = ofw_bus_map_intr(dev, iparent, icells, intr); } else { /* No property: our best guess is the intpin. */ intr[0] = pci_get_intpin(child); } /* * If we got intr from a property, it may or may not be an intpin. * For on-board devices, it frequently is not, and is completely out * of the valid intpin range. For PCI slots, it hopefully is, * otherwise we will have trouble interfacing with non-OFW buses * such as cardbus. * Since we cannot tell which it is without violating layering, we * will always use the route_interrupt method, and treat exceptions * on the level they become apparent. */ return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child, intr[0])); } static const struct ofw_bus_devinfo * ofw_pcibus_get_devinfo(device_t bus, device_t dev) { struct ofw_pcibus_devinfo *dinfo; dinfo = device_get_ivars(dev); return (&dinfo->opd_obdinfo); } int ofw_pcibus_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { int d, error; d = platform_node_numa_domain(ofw_bus_get_node(dev)); switch (op) { case LOCAL_CPUS: if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = cpuset_domain[d]; return (0); case INTR_CPUS: error = bus_generic_get_cpus(dev, child, op, setsize, cpuset); if (error != 0) return (error); if (setsize != sizeof(cpuset_t)) return (EINVAL); CPU_AND(cpuset, &cpuset_domain[d]); return (0); default: return (bus_generic_get_cpus(dev, child, op, setsize, cpuset)); } return (0); } /* * Fetch the NUMA domain for the given device 'dev'. * * If a device has a _PXM method, map that to a NUMA domain. * Otherwise, pass the request up to the parent. * If there's no matching domain or the domain cannot be * determined, return ENOENT. */ int ofw_pcibus_get_domain(device_t dev, device_t child, int *domain) { *domain = platform_node_numa_domain(ofw_bus_get_node(child)); return (0); } diff --git a/sys/powerpc/powermac/macgpio.c b/sys/powerpc/powermac/macgpio.c index 68eb07e7b391..a82888435163 100644 --- a/sys/powerpc/powermac/macgpio.c +++ b/sys/powerpc/powermac/macgpio.c @@ -1,401 +1,401 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright 2008 by Nathan Whitehorn. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Driver for MacIO GPIO controller */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Macgpio softc */ struct macgpio_softc { phandle_t sc_node; struct resource *sc_gpios; int sc_gpios_rid; uint32_t sc_saved_gpio_levels[2]; uint32_t sc_saved_gpios[GPIO_COUNT]; uint32_t sc_saved_extint_gpios[GPIO_EXTINT_COUNT]; }; static MALLOC_DEFINE(M_MACGPIO, "macgpio", "macgpio device information"); static int macgpio_probe(device_t); static int macgpio_attach(device_t); static int macgpio_print_child(device_t dev, device_t child); static void macgpio_probe_nomatch(device_t, device_t); static struct resource *macgpio_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int macgpio_activate_resource(device_t, device_t, int, int, struct resource *); static int macgpio_deactivate_resource(device_t, device_t, int, int, struct resource *); static ofw_bus_get_devinfo_t macgpio_get_devinfo; static int macgpio_suspend(device_t dev); static int macgpio_resume(device_t dev); /* * Bus interface definition */ static device_method_t macgpio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, macgpio_probe), DEVMETHOD(device_attach, macgpio_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, macgpio_suspend), DEVMETHOD(device_resume, macgpio_resume), /* Bus interface */ DEVMETHOD(bus_print_child, macgpio_print_child), DEVMETHOD(bus_probe_nomatch, macgpio_probe_nomatch), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, macgpio_alloc_resource), DEVMETHOD(bus_activate_resource, macgpio_activate_resource), DEVMETHOD(bus_deactivate_resource, macgpio_deactivate_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), - DEVMETHOD(bus_child_pnpinfo_str, ofw_bus_gen_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, macgpio_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t macgpio_pci_driver = { "macgpio", macgpio_methods, sizeof(struct macgpio_softc) }; devclass_t macgpio_devclass; EARLY_DRIVER_MODULE(macgpio, macio, macgpio_pci_driver, macgpio_devclass, 0, 0, BUS_PASS_BUS); struct macgpio_devinfo { struct ofw_bus_devinfo mdi_obdinfo; struct resource_list mdi_resources; int gpio_num; }; static int macgpio_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (name && strcmp(name, "gpio") == 0) { device_set_desc(dev, "MacIO GPIO Controller"); return (0); } return (ENXIO); } /* * Scan Open Firmware child nodes, and attach these as children * of the macgpio bus */ static int macgpio_attach(device_t dev) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; phandle_t root, child, iparent; device_t cdev; uint32_t irq[2]; sc = device_get_softc(dev); root = sc->sc_node = ofw_bus_get_node(dev); sc->sc_gpios = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_gpios_rid, RF_ACTIVE); /* * Iterate through the sub-devices */ for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_MACGPIO, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->mdi_obdinfo, child) != 0) { free(dinfo, M_MACGPIO); continue; } if (OF_getencprop(child, "reg", &dinfo->gpio_num, sizeof(dinfo->gpio_num)) != sizeof(dinfo->gpio_num)) { /* * Some early GPIO controllers don't provide GPIO * numbers for GPIOs designed only to provide * interrupt resources. We should still allow these * to attach, but with caution. */ dinfo->gpio_num = -1; } resource_list_init(&dinfo->mdi_resources); if (OF_getencprop(child, "interrupts", irq, sizeof(irq)) == sizeof(irq)) { OF_searchencprop(child, "interrupt-parent", &iparent, sizeof(iparent)); resource_list_add(&dinfo->mdi_resources, SYS_RES_IRQ, 0, MAP_IRQ(iparent, irq[0]), MAP_IRQ(iparent, irq[0]), 1); } /* Fix messed-up offsets */ if (dinfo->gpio_num > 0x50) dinfo->gpio_num -= 0x50; cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->mdi_obdinfo.obd_name); ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_MACGPIO); continue; } device_set_ivars(cdev, dinfo); } return (bus_generic_attach(dev)); } static int macgpio_print_child(device_t dev, device_t child) { struct macgpio_devinfo *dinfo; int retval = 0; dinfo = device_get_ivars(child); retval += bus_print_child_header(dev, child); if (dinfo->gpio_num >= GPIO_BASE) printf(" gpio %d", dinfo->gpio_num - GPIO_BASE); else if (dinfo->gpio_num >= GPIO_EXTINT_BASE) printf(" extint-gpio %d", dinfo->gpio_num - GPIO_EXTINT_BASE); else if (dinfo->gpio_num >= 0) printf(" addr 0x%02x", dinfo->gpio_num); /* should not happen */ resource_list_print_type(&dinfo->mdi_resources, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void macgpio_probe_nomatch(device_t dev, device_t child) { struct macgpio_devinfo *dinfo; const char *type; if (bootverbose) { dinfo = device_get_ivars(child); if ((type = ofw_bus_get_type(child)) == NULL) type = "(unknown)"; device_printf(dev, "<%s, %s>", type, ofw_bus_get_name(child)); if (dinfo->gpio_num >= 0) printf(" gpio %d",dinfo->gpio_num); resource_list_print_type(&dinfo->mdi_resources, "irq", SYS_RES_IRQ, "%jd"); printf(" (no driver attached)\n"); } } static struct resource * macgpio_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct macgpio_devinfo *dinfo; dinfo = device_get_ivars(child); if (type != SYS_RES_IRQ) return (NULL); return (resource_list_alloc(&dinfo->mdi_resources, bus, child, type, rid, start, end, count, flags)); } static int macgpio_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; u_char val; sc = device_get_softc(bus); dinfo = device_get_ivars(child); if (type != SYS_RES_IRQ) return ENXIO; if (dinfo->gpio_num >= 0) { val = bus_read_1(sc->sc_gpios,dinfo->gpio_num); val |= 0x80; bus_write_1(sc->sc_gpios,dinfo->gpio_num,val); } return (bus_activate_resource(bus, type, rid, res)); } static int macgpio_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; u_char val; sc = device_get_softc(bus); dinfo = device_get_ivars(child); if (type != SYS_RES_IRQ) return ENXIO; if (dinfo->gpio_num >= 0) { val = bus_read_1(sc->sc_gpios,dinfo->gpio_num); val &= ~0x80; bus_write_1(sc->sc_gpios,dinfo->gpio_num,val); } return (bus_deactivate_resource(bus, type, rid, res)); } uint8_t macgpio_read(device_t dev) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; sc = device_get_softc(device_get_parent(dev)); dinfo = device_get_ivars(dev); if (dinfo->gpio_num < 0) return (0); return (bus_read_1(sc->sc_gpios,dinfo->gpio_num)); } void macgpio_write(device_t dev, uint8_t val) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; sc = device_get_softc(device_get_parent(dev)); dinfo = device_get_ivars(dev); if (dinfo->gpio_num < 0) return; bus_write_1(sc->sc_gpios,dinfo->gpio_num,val); } static const struct ofw_bus_devinfo * macgpio_get_devinfo(device_t dev, device_t child) { struct macgpio_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_obdinfo); } static int macgpio_suspend(device_t dev) { struct macgpio_softc *sc; int i; sc = device_get_softc(dev); sc->sc_saved_gpio_levels[0] = bus_read_4(sc->sc_gpios, GPIO_LEVELS_0); sc->sc_saved_gpio_levels[1] = bus_read_4(sc->sc_gpios, GPIO_LEVELS_1); for (i = 0; i < GPIO_COUNT; i++) sc->sc_saved_gpios[i] = bus_read_1(sc->sc_gpios, GPIO_BASE + i); for (i = 0; i < GPIO_EXTINT_COUNT; i++) sc->sc_saved_extint_gpios[i] = bus_read_1(sc->sc_gpios, GPIO_EXTINT_BASE + i); return (0); } static int macgpio_resume(device_t dev) { struct macgpio_softc *sc; int i; sc = device_get_softc(dev); bus_write_4(sc->sc_gpios, GPIO_LEVELS_0, sc->sc_saved_gpio_levels[0]); bus_write_4(sc->sc_gpios, GPIO_LEVELS_1, sc->sc_saved_gpio_levels[1]); for (i = 0; i < GPIO_COUNT; i++) bus_write_1(sc->sc_gpios, GPIO_BASE + i, sc->sc_saved_gpios[i]); for (i = 0; i < GPIO_EXTINT_COUNT; i++) bus_write_1(sc->sc_gpios, GPIO_EXTINT_BASE + i, sc->sc_saved_extint_gpios[i]); return (0); } diff --git a/sys/powerpc/powermac/macio.c b/sys/powerpc/powermac/macio.c index 35ee90fb75b5..b475277be816 100644 --- a/sys/powerpc/powermac/macio.c +++ b/sys/powerpc/powermac/macio.c @@ -1,695 +1,695 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright 2002 by Peter Grehan. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Driver for KeyLargo/Pangea, the MacPPC south bridge ASIC. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Macio softc */ struct macio_softc { phandle_t sc_node; vm_offset_t sc_base; vm_offset_t sc_size; struct rman sc_mem_rman; /* FCR registers */ int sc_memrid; struct resource *sc_memr; }; static MALLOC_DEFINE(M_MACIO, "macio", "macio device information"); static int macio_probe(device_t); static int macio_attach(device_t); static int macio_print_child(device_t dev, device_t child); static void macio_probe_nomatch(device_t, device_t); static struct resource *macio_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int macio_activate_resource(device_t, device_t, int, int, struct resource *); static int macio_deactivate_resource(device_t, device_t, int, int, struct resource *); static int macio_release_resource(device_t, device_t, int, int, struct resource *); static struct resource_list *macio_get_resource_list (device_t, device_t); static ofw_bus_get_devinfo_t macio_get_devinfo; /* * Bus interface definition */ static device_method_t macio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, macio_probe), DEVMETHOD(device_attach, macio_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, macio_print_child), DEVMETHOD(bus_probe_nomatch, macio_probe_nomatch), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, macio_alloc_resource), DEVMETHOD(bus_release_resource, macio_release_resource), DEVMETHOD(bus_activate_resource, macio_activate_resource), DEVMETHOD(bus_deactivate_resource, macio_deactivate_resource), DEVMETHOD(bus_get_resource_list, macio_get_resource_list), - DEVMETHOD(bus_child_pnpinfo_str, ofw_bus_gen_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, macio_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t macio_pci_driver = { "macio", macio_methods, sizeof(struct macio_softc) }; devclass_t macio_devclass; EARLY_DRIVER_MODULE(macio, pci, macio_pci_driver, macio_devclass, 0, 0, BUS_PASS_BUS); /* * PCI ID search table */ static struct macio_pci_dev { u_int32_t mpd_devid; char *mpd_desc; } macio_pci_devlist[] = { { 0x0017106b, "Paddington I/O Controller" }, { 0x0022106b, "KeyLargo I/O Controller" }, { 0x0025106b, "Pangea I/O Controller" }, { 0x003e106b, "Intrepid I/O Controller" }, { 0x0041106b, "K2 KeyLargo I/O Controller" }, { 0x004f106b, "Shasta I/O Controller" }, { 0, NULL } }; /* * Devices to exclude from the probe * XXX some of these may be required in the future... */ #define MACIO_QUIRK_IGNORE 0x00000001 #define MACIO_QUIRK_CHILD_HAS_INTR 0x00000002 #define MACIO_QUIRK_USE_CHILD_REG 0x00000004 struct macio_quirk_entry { const char *mq_name; int mq_quirks; }; static struct macio_quirk_entry macio_quirks[] = { { "escc-legacy", MACIO_QUIRK_IGNORE }, { "timer", MACIO_QUIRK_IGNORE }, { "escc", MACIO_QUIRK_CHILD_HAS_INTR }, { "i2s", MACIO_QUIRK_CHILD_HAS_INTR | MACIO_QUIRK_USE_CHILD_REG }, { NULL, 0 } }; static int macio_get_quirks(const char *name) { struct macio_quirk_entry *mqe; for (mqe = macio_quirks; mqe->mq_name != NULL; mqe++) if (strcmp(name, mqe->mq_name) == 0) return (mqe->mq_quirks); return (0); } /* * Add an interrupt to the dev's resource list if present */ static void macio_add_intr(phandle_t devnode, struct macio_devinfo *dinfo) { phandle_t iparent; int *intr; int i, nintr; int icells; if (dinfo->mdi_ninterrupts >= 6) { printf("macio: device has more than 6 interrupts\n"); return; } nintr = OF_getprop_alloc_multi(devnode, "interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) { nintr = OF_getprop_alloc_multi(devnode, "AAPL,interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) return; } if (intr[0] == -1) return; if (OF_getprop(devnode, "interrupt-parent", &iparent, sizeof(iparent)) <= 0) panic("Interrupt but no interrupt parent!\n"); if (OF_getprop(OF_node_from_xref(iparent), "#interrupt-cells", &icells, sizeof(icells)) <= 0) icells = 1; for (i = 0; i < nintr; i+=icells) { u_int irq = MAP_IRQ(iparent, intr[i]); resource_list_add(&dinfo->mdi_resources, SYS_RES_IRQ, dinfo->mdi_ninterrupts, irq, irq, 1); dinfo->mdi_interrupts[dinfo->mdi_ninterrupts] = irq; dinfo->mdi_ninterrupts++; } } static void macio_add_reg(phandle_t devnode, struct macio_devinfo *dinfo) { struct macio_reg *reg, *regp; phandle_t child; char buf[8]; int i, layout_id = 0, nreg, res; nreg = OF_getprop_alloc_multi(devnode, "reg", sizeof(*reg), (void **)®); if (nreg == -1) return; /* * Some G5's have broken properties in the i2s-a area. If so we try * to fix it. Right now we know of two different cases, one for * sound layout-id 36 and the other one for sound layout-id 76. * What is missing is the base address for the memory addresses. * We take them from the parent node (i2s) and use the size * information from the child. */ if (reg[0].mr_base == 0) { child = OF_child(devnode); while (child != 0) { res = OF_getprop(child, "name", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "sound") == 0) break; child = OF_peer(child); } res = OF_getprop(child, "layout-id", &layout_id, sizeof(layout_id)); if (res > 0 && (layout_id == 36 || layout_id == 76)) { res = OF_getprop_alloc_multi(OF_parent(devnode), "reg", sizeof(*regp), (void **)®p); reg[0] = regp[0]; reg[1].mr_base = regp[1].mr_base; reg[2].mr_base = regp[1].mr_base + reg[1].mr_size; } } for (i = 0; i < nreg; i++) { resource_list_add(&dinfo->mdi_resources, SYS_RES_MEMORY, i, reg[i].mr_base, reg[i].mr_base + reg[i].mr_size, reg[i].mr_size); } } /* * PCI probe */ static int macio_probe(device_t dev) { int i; u_int32_t devid; devid = pci_get_devid(dev); for (i = 0; macio_pci_devlist[i].mpd_desc != NULL; i++) { if (devid == macio_pci_devlist[i].mpd_devid) { device_set_desc(dev, macio_pci_devlist[i].mpd_desc); return (0); } } return (ENXIO); } /* * PCI attach: scan Open Firmware child nodes, and attach these as children * of the macio bus */ static int macio_attach(device_t dev) { struct macio_softc *sc; struct macio_devinfo *dinfo; phandle_t root; phandle_t child; phandle_t subchild; device_t cdev; u_int reg[3]; char compat[32]; int error, quirks; sc = device_get_softc(dev); root = sc->sc_node = ofw_bus_get_node(dev); /* * Locate the device node and it's base address */ if (OF_getprop(root, "assigned-addresses", reg, sizeof(reg)) < (ssize_t)sizeof(reg)) { return (ENXIO); } /* Used later to see if we have to enable the I2S part. */ OF_getprop(root, "compatible", compat, sizeof(compat)); sc->sc_base = reg[2]; sc->sc_size = MACIO_REG_SIZE; sc->sc_memrid = PCIR_BAR(0); sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_memrid, RF_ACTIVE); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "MacIO Device Memory"; error = rman_init(&sc->sc_mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } error = rman_manage_region(&sc->sc_mem_rman, 0, sc->sc_size); if (error) { device_printf(dev, "rman_manage_region() failed. error = %d\n", error); return (error); } /* * Iterate through the sub-devices */ for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_MACIO, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->mdi_obdinfo, child) != 0) { free(dinfo, M_MACIO); continue; } quirks = macio_get_quirks(dinfo->mdi_obdinfo.obd_name); if ((quirks & MACIO_QUIRK_IGNORE) != 0) { ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_MACIO); continue; } resource_list_init(&dinfo->mdi_resources); dinfo->mdi_ninterrupts = 0; macio_add_intr(child, dinfo); if ((quirks & MACIO_QUIRK_USE_CHILD_REG) != 0) macio_add_reg(OF_child(child), dinfo); else macio_add_reg(child, dinfo); if ((quirks & MACIO_QUIRK_CHILD_HAS_INTR) != 0) for (subchild = OF_child(child); subchild != 0; subchild = OF_peer(subchild)) macio_add_intr(subchild, dinfo); cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->mdi_obdinfo.obd_name); resource_list_free(&dinfo->mdi_resources); ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_MACIO); continue; } device_set_ivars(cdev, dinfo); /* Set FCRs to enable some devices */ if (sc->sc_memr == NULL) continue; if (strcmp(ofw_bus_get_name(cdev), "bmac") == 0 || (ofw_bus_get_compat(cdev) != NULL && strcmp(ofw_bus_get_compat(cdev), "bmac+") == 0)) { uint32_t fcr; fcr = bus_read_4(sc->sc_memr, HEATHROW_FCR); fcr |= FCR_ENET_ENABLE & ~FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); fcr |= FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); fcr &= ~FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); } /* * Make sure the I2S0 and the I2S0_CLK are enabled. * On certain G5's they are not. */ if ((strcmp(ofw_bus_get_name(cdev), "i2s") == 0) && (strcmp(compat, "K2-Keylargo") == 0)) { uint32_t fcr1; fcr1 = bus_read_4(sc->sc_memr, KEYLARGO_FCR1); fcr1 |= FCR1_I2S0_CLK_ENABLE | FCR1_I2S0_ENABLE; bus_write_4(sc->sc_memr, KEYLARGO_FCR1, fcr1); } } return (bus_generic_attach(dev)); } static int macio_print_child(device_t dev, device_t child) { struct macio_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->mdi_resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void macio_probe_nomatch(device_t dev, device_t child) { struct macio_devinfo *dinfo; struct resource_list *rl; const char *type; if (bootverbose) { dinfo = device_get_ivars(child); rl = &dinfo->mdi_resources; if ((type = ofw_bus_get_type(child)) == NULL) type = "(unknown)"; device_printf(dev, "<%s, %s>", type, ofw_bus_get_name(child)); resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); printf(" (no driver attached)\n"); } } static struct resource * macio_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct macio_softc *sc; int needactivate; struct resource *rv; struct rman *rm; u_long adjstart, adjend, adjcount; struct macio_devinfo *dinfo; struct resource_list_entry *rle; sc = device_get_softc(bus); dinfo = device_get_ivars(child); needactivate = flags & RF_ACTIVE; flags &= ~RF_ACTIVE; switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rle = resource_list_find(&dinfo->mdi_resources, SYS_RES_MEMORY, *rid); if (rle == NULL) { device_printf(bus, "no rle for %s memory %d\n", device_get_nameunit(child), *rid); return (NULL); } if (start < rle->start) adjstart = rle->start; else if (start > rle->end) adjstart = rle->end; else adjstart = start; if (end < rle->start) adjend = rle->start; else if (end > rle->end) adjend = rle->end; else adjend = end; adjcount = adjend - adjstart; rm = &sc->sc_mem_rman; break; case SYS_RES_IRQ: /* Check for passthrough from subattachments like macgpio */ if (device_get_parent(child) != bus) return BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); rle = resource_list_find(&dinfo->mdi_resources, SYS_RES_IRQ, *rid); if (rle == NULL) { if (dinfo->mdi_ninterrupts >= 6) { device_printf(bus, "%s has more than 6 interrupts\n", device_get_nameunit(child)); return (NULL); } resource_list_add(&dinfo->mdi_resources, SYS_RES_IRQ, dinfo->mdi_ninterrupts, start, start, 1); dinfo->mdi_interrupts[dinfo->mdi_ninterrupts] = start; dinfo->mdi_ninterrupts++; } return (resource_list_alloc(&dinfo->mdi_resources, bus, child, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } rv = rman_reserve_resource(rm, adjstart, adjend, adjcount, flags, child); if (rv == NULL) { device_printf(bus, "failed to reserve resource %#lx - %#lx (%#lx) for %s\n", adjstart, adjend, adjcount, device_get_nameunit(child)); return (NULL); } rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv) != 0) { device_printf(bus, "failed to activate resource for %s\n", device_get_nameunit(child)); rman_release_resource(rv); return (NULL); } } return (rv); } static int macio_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { if (rman_get_flags(res) & RF_ACTIVE) { int error = bus_deactivate_resource(child, type, rid, res); if (error) return error; } return (rman_release_resource(res)); } static int macio_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { struct macio_softc *sc; void *p; sc = device_get_softc(bus); if (type == SYS_RES_IRQ) return (bus_activate_resource(bus, type, rid, res)); if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { p = pmap_mapdev((vm_offset_t)rman_get_start(res) + sc->sc_base, (vm_size_t)rman_get_size(res)); if (p == NULL) return (ENOMEM); rman_set_virtual(res, p); rman_set_bustag(res, &bs_le_tag); rman_set_bushandle(res, (u_long)p); } return (rman_activate_resource(res)); } static int macio_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { /* * If this is a memory resource, unmap it. */ if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { u_int32_t psize; psize = rman_get_size(res); pmap_unmapdev((vm_offset_t)rman_get_virtual(res), psize); } return (rman_deactivate_resource(res)); } static struct resource_list * macio_get_resource_list (device_t dev, device_t child) { struct macio_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_resources); } static const struct ofw_bus_devinfo * macio_get_devinfo(device_t dev, device_t child) { struct macio_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_obdinfo); } int macio_enable_wireless(device_t dev, bool enable) { struct macio_softc *sc = device_get_softc(dev); uint32_t x; if (enable) { x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x |= 0x4; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* Enable card slot. */ bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0f, 5); DELAY(1000); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0f, 4); DELAY(1000); x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x &= ~0x80000000; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* out8(gpio + 0x10, 4); */ bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0b, 0); bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0a, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0d, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0d, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0e, 0x28); bus_write_4(sc->sc_memr, 0x1c000, 0); /* Initialize the card. */ bus_write_4(sc->sc_memr, 0x1a3e0, 0x41); x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x |= 0x80000000; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); } else { x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x &= ~0x4; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* out8(gpio + 0x10, 0); */ } return (0); } diff --git a/sys/powerpc/powermac/smu.c b/sys/powerpc/powermac/smu.c index 6c0badec2ec2..2981b89c59f2 100644 --- a/sys/powerpc/powermac/smu.c +++ b/sys/powerpc/powermac/smu.c @@ -1,1582 +1,1582 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "clock_if.h" #include "iicbus_if.h" struct smu_cmd { volatile uint8_t cmd; uint8_t len; uint8_t data[254]; STAILQ_ENTRY(smu_cmd) cmd_q; }; STAILQ_HEAD(smu_cmdq, smu_cmd); struct smu_fan { struct pmac_fan fan; device_t dev; cell_t reg; enum { SMU_FAN_RPM, SMU_FAN_PWM } type; int setpoint; int old_style; int rpm; }; /* We can read the PWM and the RPM from a PWM controlled fan. * Offer both values via sysctl. */ enum { SMU_PWM_SYSCTL_PWM = 1 << 8, SMU_PWM_SYSCTL_RPM = 2 << 8 }; struct smu_sensor { struct pmac_therm therm; device_t dev; cell_t reg; enum { SMU_CURRENT_SENSOR, SMU_VOLTAGE_SENSOR, SMU_POWER_SENSOR, SMU_TEMP_SENSOR } type; }; struct smu_softc { device_t sc_dev; struct mtx sc_mtx; struct resource *sc_memr; int sc_memrid; int sc_u3; bus_dma_tag_t sc_dmatag; bus_space_tag_t sc_bt; bus_space_handle_t sc_mailbox; struct smu_cmd *sc_cmd, *sc_cur_cmd; bus_addr_t sc_cmd_phys; bus_dmamap_t sc_cmd_dmamap; struct smu_cmdq sc_cmdq; struct smu_fan *sc_fans; int sc_nfans; int old_style_fans; struct smu_sensor *sc_sensors; int sc_nsensors; int sc_doorbellirqid; struct resource *sc_doorbellirq; void *sc_doorbellirqcookie; struct proc *sc_fanmgt_proc; time_t sc_lastuserchange; /* Calibration data */ uint16_t sc_cpu_diode_scale; int16_t sc_cpu_diode_offset; uint16_t sc_cpu_volt_scale; int16_t sc_cpu_volt_offset; uint16_t sc_cpu_curr_scale; int16_t sc_cpu_curr_offset; uint16_t sc_slots_pow_scale; int16_t sc_slots_pow_offset; struct cdev *sc_leddev; }; /* regular bus attachment functions */ static int smu_probe(device_t); static int smu_attach(device_t); static const struct ofw_bus_devinfo * smu_get_devinfo(device_t bus, device_t dev); /* cpufreq notification hooks */ static void smu_cpufreq_pre_change(device_t, const struct cf_level *level); static void smu_cpufreq_post_change(device_t, const struct cf_level *level); /* clock interface */ static int smu_gettime(device_t dev, struct timespec *ts); static int smu_settime(device_t dev, struct timespec *ts); /* utility functions */ static int smu_run_cmd(device_t dev, struct smu_cmd *cmd, int wait); static int smu_get_datablock(device_t dev, int8_t id, uint8_t *buf, size_t len); static void smu_attach_i2c(device_t dev, phandle_t i2croot); static void smu_attach_fans(device_t dev, phandle_t fanroot); static void smu_attach_sensors(device_t dev, phandle_t sensroot); static void smu_set_sleepled(void *xdev, int onoff); static int smu_server_mode(SYSCTL_HANDLER_ARGS); static void smu_doorbell_intr(void *xdev); static void smu_shutdown(void *xdev, int howto); /* where to find the doorbell GPIO */ static device_t smu_doorbell = NULL; static device_method_t smu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, smu_probe), DEVMETHOD(device_attach, smu_attach), /* Clock interface */ DEVMETHOD(clock_gettime, smu_gettime), DEVMETHOD(clock_settime, smu_settime), /* ofw_bus interface */ - DEVMETHOD(bus_child_pnpinfo_str,ofw_bus_gen_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(ofw_bus_get_devinfo, smu_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 }, }; static driver_t smu_driver = { "smu", smu_methods, sizeof(struct smu_softc) }; static devclass_t smu_devclass; DRIVER_MODULE(smu, ofwbus, smu_driver, smu_devclass, 0, 0); static MALLOC_DEFINE(M_SMU, "smu", "SMU Sensor Information"); #define SMU_MAILBOX 0x8000860c #define SMU_FANMGT_INTERVAL 1000 /* ms */ /* Command types */ #define SMU_ADC 0xd8 #define SMU_FAN 0x4a #define SMU_RPM_STATUS 0x01 #define SMU_RPM_SETPOINT 0x02 #define SMU_PWM_STATUS 0x11 #define SMU_PWM_SETPOINT 0x12 #define SMU_I2C 0x9a #define SMU_I2C_SIMPLE 0x00 #define SMU_I2C_NORMAL 0x01 #define SMU_I2C_COMBINED 0x02 #define SMU_MISC 0xee #define SMU_MISC_GET_DATA 0x02 #define SMU_MISC_LED_CTRL 0x04 #define SMU_POWER 0xaa #define SMU_POWER_EVENTS 0x8f #define SMU_PWR_GET_POWERUP 0x00 #define SMU_PWR_SET_POWERUP 0x01 #define SMU_PWR_CLR_POWERUP 0x02 #define SMU_RTC 0x8e #define SMU_RTC_GET 0x81 #define SMU_RTC_SET 0x80 /* Power event types */ #define SMU_WAKEUP_KEYPRESS 0x01 #define SMU_WAKEUP_AC_INSERT 0x02 #define SMU_WAKEUP_AC_CHANGE 0x04 #define SMU_WAKEUP_RING 0x10 /* Data blocks */ #define SMU_CPUTEMP_CAL 0x18 #define SMU_CPUVOLT_CAL 0x21 #define SMU_SLOTPW_CAL 0x78 /* Partitions */ #define SMU_PARTITION 0x3e #define SMU_PARTITION_LATEST 0x01 #define SMU_PARTITION_BASE 0x02 #define SMU_PARTITION_UPDATE 0x03 static int smu_probe(device_t dev) { const char *name = ofw_bus_get_name(dev); if (strcmp(name, "smu") != 0) return (ENXIO); device_set_desc(dev, "Apple System Management Unit"); return (0); } static void smu_phys_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct smu_softc *sc = xsc; sc->sc_cmd_phys = segs[0].ds_addr; } static int smu_attach(device_t dev) { struct smu_softc *sc; phandle_t node, child; uint8_t data[12]; sc = device_get_softc(dev); mtx_init(&sc->sc_mtx, "smu", NULL, MTX_DEF); sc->sc_cur_cmd = NULL; sc->sc_doorbellirqid = -1; sc->sc_u3 = 0; if (OF_finddevice("/u3") != -1) sc->sc_u3 = 1; /* * Map the mailbox area. This should be determined from firmware, * but I have not found a simple way to do that. */ bus_dma_tag_create(NULL, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE, 1, PAGE_SIZE, 0, NULL, NULL, &(sc->sc_dmatag)); sc->sc_bt = &bs_le_tag; bus_space_map(sc->sc_bt, SMU_MAILBOX, 4, 0, &sc->sc_mailbox); /* * Allocate the command buffer. This can be anywhere in the low 4 GB * of memory. */ bus_dmamem_alloc(sc->sc_dmatag, (void **)&sc->sc_cmd, BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->sc_cmd_dmamap); bus_dmamap_load(sc->sc_dmatag, sc->sc_cmd_dmamap, sc->sc_cmd, PAGE_SIZE, smu_phys_callback, sc, 0); STAILQ_INIT(&sc->sc_cmdq); /* * Set up handlers to change CPU voltage when CPU frequency is changed. */ EVENTHANDLER_REGISTER(cpufreq_pre_change, smu_cpufreq_pre_change, dev, EVENTHANDLER_PRI_ANY); EVENTHANDLER_REGISTER(cpufreq_post_change, smu_cpufreq_post_change, dev, EVENTHANDLER_PRI_ANY); node = ofw_bus_get_node(dev); /* Some SMUs have RPM and PWM controlled fans which do not sit * under the same node. So we have to attach them separately. */ smu_attach_fans(dev, node); /* * Now detect and attach the other child devices. */ for (child = OF_child(node); child != 0; child = OF_peer(child)) { char name[32]; memset(name, 0, sizeof(name)); OF_getprop(child, "name", name, sizeof(name)); if (strncmp(name, "sensors", 8) == 0) smu_attach_sensors(dev, child); if (strncmp(name, "smu-i2c-control", 15) == 0) smu_attach_i2c(dev, child); } /* Some SMUs have the I2C children directly under the bus. */ smu_attach_i2c(dev, node); /* * Collect calibration constants. */ smu_get_datablock(dev, SMU_CPUTEMP_CAL, data, sizeof(data)); sc->sc_cpu_diode_scale = (data[4] << 8) + data[5]; sc->sc_cpu_diode_offset = (data[6] << 8) + data[7]; smu_get_datablock(dev, SMU_CPUVOLT_CAL, data, sizeof(data)); sc->sc_cpu_volt_scale = (data[4] << 8) + data[5]; sc->sc_cpu_volt_offset = (data[6] << 8) + data[7]; sc->sc_cpu_curr_scale = (data[8] << 8) + data[9]; sc->sc_cpu_curr_offset = (data[10] << 8) + data[11]; smu_get_datablock(dev, SMU_SLOTPW_CAL, data, sizeof(data)); sc->sc_slots_pow_scale = (data[4] << 8) + data[5]; sc->sc_slots_pow_offset = (data[6] << 8) + data[7]; /* * Set up LED interface */ sc->sc_leddev = led_create(smu_set_sleepled, dev, "sleepled"); /* * Reset on power loss behavior */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "server_mode", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, dev, 0, smu_server_mode, "I", "Enable reboot after power failure"); /* * Set up doorbell interrupt. */ sc->sc_doorbellirqid = 0; sc->sc_doorbellirq = bus_alloc_resource_any(smu_doorbell, SYS_RES_IRQ, &sc->sc_doorbellirqid, RF_ACTIVE); bus_setup_intr(smu_doorbell, sc->sc_doorbellirq, INTR_TYPE_MISC | INTR_MPSAFE, NULL, smu_doorbell_intr, dev, &sc->sc_doorbellirqcookie); powerpc_config_intr(rman_get_start(sc->sc_doorbellirq), INTR_TRIGGER_EDGE, INTR_POLARITY_LOW); /* * Connect RTC interface. */ clock_register(dev, 1000); /* * Learn about shutdown events */ EVENTHANDLER_REGISTER(shutdown_final, smu_shutdown, dev, SHUTDOWN_PRI_LAST); return (bus_generic_attach(dev)); } static const struct ofw_bus_devinfo * smu_get_devinfo(device_t bus, device_t dev) { return (device_get_ivars(dev)); } static void smu_send_cmd(device_t dev, struct smu_cmd *cmd) { struct smu_softc *sc; sc = device_get_softc(dev); mtx_assert(&sc->sc_mtx, MA_OWNED); if (sc->sc_u3) powerpc_pow_enabled = 0; /* SMU cannot work if we go to NAP */ sc->sc_cur_cmd = cmd; /* Copy the command to the mailbox */ sc->sc_cmd->cmd = cmd->cmd; sc->sc_cmd->len = cmd->len; memcpy(sc->sc_cmd->data, cmd->data, sizeof(cmd->data)); bus_dmamap_sync(sc->sc_dmatag, sc->sc_cmd_dmamap, BUS_DMASYNC_PREWRITE); bus_space_write_4(sc->sc_bt, sc->sc_mailbox, 0, sc->sc_cmd_phys); /* Flush the cacheline it is in -- SMU bypasses the cache */ __asm __volatile("sync; dcbf 0,%0; sync" :: "r"(sc->sc_cmd): "memory"); /* Ring SMU doorbell */ macgpio_write(smu_doorbell, GPIO_DDR_OUTPUT); } static void smu_doorbell_intr(void *xdev) { device_t smu; struct smu_softc *sc; int doorbell_ack; smu = xdev; doorbell_ack = macgpio_read(smu_doorbell); sc = device_get_softc(smu); if (doorbell_ack != (GPIO_DDR_OUTPUT | GPIO_LEVEL_RO | GPIO_DATA)) return; mtx_lock(&sc->sc_mtx); if (sc->sc_cur_cmd == NULL) /* spurious */ goto done; /* Check result. First invalidate the cache again... */ __asm __volatile("dcbf 0,%0; sync" :: "r"(sc->sc_cmd) : "memory"); bus_dmamap_sync(sc->sc_dmatag, sc->sc_cmd_dmamap, BUS_DMASYNC_POSTREAD); sc->sc_cur_cmd->cmd = sc->sc_cmd->cmd; sc->sc_cur_cmd->len = sc->sc_cmd->len; memcpy(sc->sc_cur_cmd->data, sc->sc_cmd->data, sizeof(sc->sc_cmd->data)); wakeup(sc->sc_cur_cmd); sc->sc_cur_cmd = NULL; if (sc->sc_u3) powerpc_pow_enabled = 1; done: /* Queue next command if one is pending */ if (STAILQ_FIRST(&sc->sc_cmdq) != NULL) { sc->sc_cur_cmd = STAILQ_FIRST(&sc->sc_cmdq); STAILQ_REMOVE_HEAD(&sc->sc_cmdq, cmd_q); smu_send_cmd(smu, sc->sc_cur_cmd); } mtx_unlock(&sc->sc_mtx); } static int smu_run_cmd(device_t dev, struct smu_cmd *cmd, int wait) { struct smu_softc *sc; uint8_t cmd_code; int error; sc = device_get_softc(dev); cmd_code = cmd->cmd; mtx_lock(&sc->sc_mtx); if (sc->sc_cur_cmd != NULL) { STAILQ_INSERT_TAIL(&sc->sc_cmdq, cmd, cmd_q); } else smu_send_cmd(dev, cmd); mtx_unlock(&sc->sc_mtx); if (!wait) return (0); if (sc->sc_doorbellirqid < 0) { /* Poll if the IRQ has not been set up yet */ do { DELAY(50); smu_doorbell_intr(dev); } while (sc->sc_cur_cmd != NULL); } else { /* smu_doorbell_intr will wake us when the command is ACK'ed */ error = tsleep(cmd, 0, "smu", 800 * hz / 1000); if (error != 0) smu_doorbell_intr(dev); /* One last chance */ if (error != 0) { mtx_lock(&sc->sc_mtx); if (cmd->cmd == cmd_code) { /* Never processed */ /* Abort this command if we timed out */ if (sc->sc_cur_cmd == cmd) sc->sc_cur_cmd = NULL; else STAILQ_REMOVE(&sc->sc_cmdq, cmd, smu_cmd, cmd_q); mtx_unlock(&sc->sc_mtx); return (error); } error = 0; mtx_unlock(&sc->sc_mtx); } } /* SMU acks the command by inverting the command bits */ if (cmd->cmd == ((~cmd_code) & 0xff)) error = 0; else error = EIO; return (error); } static int smu_get_datablock(device_t dev, int8_t id, uint8_t *buf, size_t len) { struct smu_cmd cmd; uint8_t addr[4]; cmd.cmd = SMU_PARTITION; cmd.len = 2; cmd.data[0] = SMU_PARTITION_LATEST; cmd.data[1] = id; smu_run_cmd(dev, &cmd, 1); addr[0] = addr[1] = 0; addr[2] = cmd.data[0]; addr[3] = cmd.data[1]; cmd.cmd = SMU_MISC; cmd.len = 7; cmd.data[0] = SMU_MISC_GET_DATA; cmd.data[1] = sizeof(addr); memcpy(&cmd.data[2], addr, sizeof(addr)); cmd.data[6] = len; smu_run_cmd(dev, &cmd, 1); memcpy(buf, cmd.data, len); return (0); } static void smu_slew_cpu_voltage(device_t dev, int to) { struct smu_cmd cmd; cmd.cmd = SMU_POWER; cmd.len = 8; cmd.data[0] = 'V'; cmd.data[1] = 'S'; cmd.data[2] = 'L'; cmd.data[3] = 'E'; cmd.data[4] = 'W'; cmd.data[5] = 0xff; cmd.data[6] = 1; cmd.data[7] = to; smu_run_cmd(dev, &cmd, 1); } static void smu_cpufreq_pre_change(device_t dev, const struct cf_level *level) { /* * Make sure the CPU voltage is raised before we raise * the clock. */ if (level->rel_set[0].freq == 10000 /* max */) smu_slew_cpu_voltage(dev, 0); } static void smu_cpufreq_post_change(device_t dev, const struct cf_level *level) { /* We are safe to reduce CPU voltage after a downward transition */ if (level->rel_set[0].freq < 10000 /* max */) smu_slew_cpu_voltage(dev, 1); /* XXX: 1/4 voltage for 970MP? */ } /* Routines for probing the SMU doorbell GPIO */ static int doorbell_probe(device_t dev); static int doorbell_attach(device_t dev); static device_method_t doorbell_methods[] = { /* Device interface */ DEVMETHOD(device_probe, doorbell_probe), DEVMETHOD(device_attach, doorbell_attach), { 0, 0 }, }; static driver_t doorbell_driver = { "smudoorbell", doorbell_methods, 0 }; static devclass_t doorbell_devclass; EARLY_DRIVER_MODULE(smudoorbell, macgpio, doorbell_driver, doorbell_devclass, 0, 0, BUS_PASS_SUPPORTDEV); static int doorbell_probe(device_t dev) { const char *name = ofw_bus_get_name(dev); if (strcmp(name, "smu-doorbell") != 0) return (ENXIO); device_set_desc(dev, "SMU Doorbell GPIO"); device_quiet(dev); return (0); } static int doorbell_attach(device_t dev) { smu_doorbell = dev; return (0); } /* * Sensor and fan management */ static int smu_fan_check_old_style(struct smu_fan *fan) { device_t smu = fan->dev; struct smu_softc *sc = device_get_softc(smu); struct smu_cmd cmd; int error; if (sc->old_style_fans != -1) return (sc->old_style_fans); /* * Apple has two fan control mechanisms. We can't distinguish * them except by seeing if the new one fails. If the new one * fails, use the old one. */ cmd.cmd = SMU_FAN; cmd.len = 2; cmd.data[0] = 0x31; cmd.data[1] = fan->reg; do { error = smu_run_cmd(smu, &cmd, 1); } while (error == EWOULDBLOCK); sc->old_style_fans = (error != 0); return (sc->old_style_fans); } static int smu_fan_set_rpm(struct smu_fan *fan, int rpm) { device_t smu = fan->dev; struct smu_cmd cmd; int error; cmd.cmd = SMU_FAN; error = EIO; /* Clamp to allowed range */ rpm = max(fan->fan.min_rpm, rpm); rpm = min(fan->fan.max_rpm, rpm); smu_fan_check_old_style(fan); if (!fan->old_style) { cmd.len = 4; cmd.data[0] = 0x30; cmd.data[1] = fan->reg; cmd.data[2] = (rpm >> 8) & 0xff; cmd.data[3] = rpm & 0xff; error = smu_run_cmd(smu, &cmd, 1); if (error && error != EWOULDBLOCK) fan->old_style = 1; } else { cmd.len = 14; cmd.data[0] = 0x00; /* RPM fan. */ cmd.data[1] = 1 << fan->reg; cmd.data[2 + 2*fan->reg] = (rpm >> 8) & 0xff; cmd.data[3 + 2*fan->reg] = rpm & 0xff; error = smu_run_cmd(smu, &cmd, 1); } if (error == 0) fan->setpoint = rpm; return (error); } static int smu_fan_read_rpm(struct smu_fan *fan) { device_t smu = fan->dev; struct smu_cmd cmd; int rpm, error; smu_fan_check_old_style(fan); if (!fan->old_style) { cmd.cmd = SMU_FAN; cmd.len = 2; cmd.data[0] = 0x31; cmd.data[1] = fan->reg; error = smu_run_cmd(smu, &cmd, 1); if (error && error != EWOULDBLOCK) fan->old_style = 1; rpm = (cmd.data[0] << 8) | cmd.data[1]; } if (fan->old_style) { cmd.cmd = SMU_FAN; cmd.len = 1; cmd.data[0] = SMU_RPM_STATUS; error = smu_run_cmd(smu, &cmd, 1); if (error) return (error); rpm = (cmd.data[fan->reg*2+1] << 8) | cmd.data[fan->reg*2+2]; } return (rpm); } static int smu_fan_set_pwm(struct smu_fan *fan, int pwm) { device_t smu = fan->dev; struct smu_cmd cmd; int error; cmd.cmd = SMU_FAN; error = EIO; /* Clamp to allowed range */ pwm = max(fan->fan.min_rpm, pwm); pwm = min(fan->fan.max_rpm, pwm); /* * Apple has two fan control mechanisms. We can't distinguish * them except by seeing if the new one fails. If the new one * fails, use the old one. */ if (!fan->old_style) { cmd.len = 4; cmd.data[0] = 0x30; cmd.data[1] = fan->reg; cmd.data[2] = (pwm >> 8) & 0xff; cmd.data[3] = pwm & 0xff; error = smu_run_cmd(smu, &cmd, 1); if (error && error != EWOULDBLOCK) fan->old_style = 1; } if (fan->old_style) { cmd.len = 14; cmd.data[0] = 0x10; /* PWM fan. */ cmd.data[1] = 1 << fan->reg; cmd.data[2 + 2*fan->reg] = (pwm >> 8) & 0xff; cmd.data[3 + 2*fan->reg] = pwm & 0xff; error = smu_run_cmd(smu, &cmd, 1); } if (error == 0) fan->setpoint = pwm; return (error); } static int smu_fan_read_pwm(struct smu_fan *fan, int *pwm, int *rpm) { device_t smu = fan->dev; struct smu_cmd cmd; int error; if (!fan->old_style) { cmd.cmd = SMU_FAN; cmd.len = 2; cmd.data[0] = 0x31; cmd.data[1] = fan->reg; error = smu_run_cmd(smu, &cmd, 1); if (error && error != EWOULDBLOCK) fan->old_style = 1; *rpm = (cmd.data[0] << 8) | cmd.data[1]; } if (fan->old_style) { cmd.cmd = SMU_FAN; cmd.len = 1; cmd.data[0] = SMU_PWM_STATUS; error = smu_run_cmd(smu, &cmd, 1); if (error) return (error); *rpm = (cmd.data[fan->reg*2+1] << 8) | cmd.data[fan->reg*2+2]; } if (fan->old_style) { cmd.cmd = SMU_FAN; cmd.len = 14; cmd.data[0] = SMU_PWM_SETPOINT; cmd.data[1] = 1 << fan->reg; error = smu_run_cmd(smu, &cmd, 1); if (error) return (error); *pwm = cmd.data[fan->reg*2+2]; } return (0); } static int smu_fanrpm_sysctl(SYSCTL_HANDLER_ARGS) { device_t smu; struct smu_softc *sc; struct smu_fan *fan; int pwm = 0, rpm, error = 0; smu = arg1; sc = device_get_softc(smu); fan = &sc->sc_fans[arg2 & 0xff]; if (fan->type == SMU_FAN_RPM) { rpm = smu_fan_read_rpm(fan); if (rpm < 0) return (rpm); error = sysctl_handle_int(oidp, &rpm, 0, req); } else { error = smu_fan_read_pwm(fan, &pwm, &rpm); if (error < 0) return (EIO); switch (arg2 & 0xff00) { case SMU_PWM_SYSCTL_PWM: error = sysctl_handle_int(oidp, &pwm, 0, req); break; case SMU_PWM_SYSCTL_RPM: error = sysctl_handle_int(oidp, &rpm, 0, req); break; default: /* This should never happen */ return (EINVAL); } } /* We can only read the RPM from a PWM controlled fan, so return. */ if ((arg2 & 0xff00) == SMU_PWM_SYSCTL_RPM) return (0); if (error || !req->newptr) return (error); sc->sc_lastuserchange = time_uptime; if (fan->type == SMU_FAN_RPM) return (smu_fan_set_rpm(fan, rpm)); else return (smu_fan_set_pwm(fan, pwm)); } static void smu_fill_fan_prop(device_t dev, phandle_t child, int id) { struct smu_fan *fan; struct smu_softc *sc; char type[32]; sc = device_get_softc(dev); fan = &sc->sc_fans[id]; OF_getprop(child, "device_type", type, sizeof(type)); /* We have either RPM or PWM controlled fans. */ if (strcmp(type, "fan-rpm-control") == 0) fan->type = SMU_FAN_RPM; else fan->type = SMU_FAN_PWM; fan->dev = dev; fan->old_style = 0; OF_getprop(child, "reg", &fan->reg, sizeof(cell_t)); OF_getprop(child, "min-value", &fan->fan.min_rpm, sizeof(int)); OF_getprop(child, "max-value", &fan->fan.max_rpm, sizeof(int)); OF_getprop(child, "zone", &fan->fan.zone, sizeof(int)); if (OF_getprop(child, "unmanaged-value", &fan->fan.default_rpm, sizeof(int)) != sizeof(int)) fan->fan.default_rpm = fan->fan.max_rpm; OF_getprop(child, "location", fan->fan.name, sizeof(fan->fan.name)); if (fan->type == SMU_FAN_RPM) fan->setpoint = smu_fan_read_rpm(fan); else smu_fan_read_pwm(fan, &fan->setpoint, &fan->rpm); } /* On the first call count the number of fans. In the second call, * after allocating the fan struct, fill the properties of the fans. */ static int smu_count_fans(device_t dev) { struct smu_softc *sc; phandle_t child, node, root; int nfans = 0; node = ofw_bus_get_node(dev); sc = device_get_softc(dev); /* First find the fanroots and count the number of fans. */ for (root = OF_child(node); root != 0; root = OF_peer(root)) { char name[32]; memset(name, 0, sizeof(name)); OF_getprop(root, "name", name, sizeof(name)); if (strncmp(name, "rpm-fans", 9) == 0 || strncmp(name, "pwm-fans", 9) == 0 || strncmp(name, "fans", 5) == 0) for (child = OF_child(root); child != 0; child = OF_peer(child)) { nfans++; /* When allocated, fill the fan properties. */ if (sc->sc_fans != NULL) { smu_fill_fan_prop(dev, child, nfans - 1); } } } if (nfans == 0) { device_printf(dev, "WARNING: No fans detected!\n"); return (0); } return (nfans); } static void smu_attach_fans(device_t dev, phandle_t fanroot) { struct smu_fan *fan; struct smu_softc *sc; struct sysctl_oid *oid, *fanroot_oid; struct sysctl_ctx_list *ctx; char sysctl_name[32]; int i, j; sc = device_get_softc(dev); /* Get the number of fans. */ sc->sc_nfans = smu_count_fans(dev); if (sc->sc_nfans == 0) return; /* Now we're able to allocate memory for the fans struct. */ sc->sc_fans = malloc(sc->sc_nfans * sizeof(struct smu_fan), M_SMU, M_WAITOK | M_ZERO); /* Now fill in the properties. */ smu_count_fans(dev); /* Register fans with pmac_thermal */ for (i = 0; i < sc->sc_nfans; i++) pmac_thermal_fan_register(&sc->sc_fans[i].fan); ctx = device_get_sysctl_ctx(dev); fanroot_oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fans", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "SMU Fan Information"); /* Add sysctls */ for (i = 0; i < sc->sc_nfans; i++) { fan = &sc->sc_fans[i]; for (j = 0; j < strlen(fan->fan.name); j++) { sysctl_name[j] = tolower(fan->fan.name[j]); if (isspace(sysctl_name[j])) sysctl_name[j] = '_'; } sysctl_name[j] = 0; if (fan->type == SMU_FAN_RPM) { oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(fanroot_oid), OID_AUTO, sysctl_name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Fan Information"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "minrpm", CTLFLAG_RD, &fan->fan.min_rpm, 0, "Minimum allowed RPM"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "maxrpm", CTLFLAG_RD, &fan->fan.max_rpm, 0, "Maximum allowed RPM"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "rpm",CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, i, smu_fanrpm_sysctl, "I", "Fan RPM"); fan->fan.read = (int (*)(struct pmac_fan *))smu_fan_read_rpm; fan->fan.set = (int (*)(struct pmac_fan *, int))smu_fan_set_rpm; } else { oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(fanroot_oid), OID_AUTO, sysctl_name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Fan Information"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "minpwm", CTLFLAG_RD, &fan->fan.min_rpm, 0, "Minimum allowed PWM in %"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "maxpwm", CTLFLAG_RD, &fan->fan.max_rpm, 0, "Maximum allowed PWM in %"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "pwm",CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, SMU_PWM_SYSCTL_PWM | i, smu_fanrpm_sysctl, "I", "Fan PWM in %"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "rpm",CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, SMU_PWM_SYSCTL_RPM | i, smu_fanrpm_sysctl, "I", "Fan RPM"); fan->fan.read = NULL; fan->fan.set = (int (*)(struct pmac_fan *, int))smu_fan_set_pwm; } if (bootverbose) device_printf(dev, "Fan: %s type: %d\n", fan->fan.name, fan->type); } } static int smu_sensor_read(struct smu_sensor *sens) { device_t smu = sens->dev; struct smu_cmd cmd; struct smu_softc *sc; int64_t value; int error; cmd.cmd = SMU_ADC; cmd.len = 1; cmd.data[0] = sens->reg; error = 0; error = smu_run_cmd(smu, &cmd, 1); if (error != 0) return (-1); sc = device_get_softc(smu); value = (cmd.data[0] << 8) | cmd.data[1]; switch (sens->type) { case SMU_TEMP_SENSOR: value *= sc->sc_cpu_diode_scale; value >>= 3; value += ((int64_t)sc->sc_cpu_diode_offset) << 9; value <<= 1; /* Convert from 16.16 fixed point degC into integer 0.1 K. */ value = 10*(value >> 16) + ((10*(value & 0xffff)) >> 16) + 2731; break; case SMU_VOLTAGE_SENSOR: value *= sc->sc_cpu_volt_scale; value += sc->sc_cpu_volt_offset; value <<= 4; /* Convert from 16.16 fixed point V into mV. */ value *= 15625; value /= 1024; value /= 1000; break; case SMU_CURRENT_SENSOR: value *= sc->sc_cpu_curr_scale; value += sc->sc_cpu_curr_offset; value <<= 4; /* Convert from 16.16 fixed point A into mA. */ value *= 15625; value /= 1024; value /= 1000; break; case SMU_POWER_SENSOR: value *= sc->sc_slots_pow_scale; value += sc->sc_slots_pow_offset; value <<= 4; /* Convert from 16.16 fixed point W into mW. */ value *= 15625; value /= 1024; value /= 1000; break; } return (value); } static int smu_sensor_sysctl(SYSCTL_HANDLER_ARGS) { device_t smu; struct smu_softc *sc; struct smu_sensor *sens; int value, error; smu = arg1; sc = device_get_softc(smu); sens = &sc->sc_sensors[arg2]; value = smu_sensor_read(sens); if (value < 0) return (EBUSY); error = sysctl_handle_int(oidp, &value, 0, req); return (error); } static void smu_attach_sensors(device_t dev, phandle_t sensroot) { struct smu_sensor *sens; struct smu_softc *sc; struct sysctl_oid *sensroot_oid; struct sysctl_ctx_list *ctx; phandle_t child; char type[32]; int i; sc = device_get_softc(dev); sc->sc_nsensors = 0; for (child = OF_child(sensroot); child != 0; child = OF_peer(child)) sc->sc_nsensors++; if (sc->sc_nsensors == 0) { device_printf(dev, "WARNING: No sensors detected!\n"); return; } sc->sc_sensors = malloc(sc->sc_nsensors * sizeof(struct smu_sensor), M_SMU, M_WAITOK | M_ZERO); sens = sc->sc_sensors; sc->sc_nsensors = 0; ctx = device_get_sysctl_ctx(dev); sensroot_oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "sensors", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "SMU Sensor Information"); for (child = OF_child(sensroot); child != 0; child = OF_peer(child)) { char sysctl_name[40], sysctl_desc[40]; const char *units; sens->dev = dev; OF_getprop(child, "device_type", type, sizeof(type)); if (strcmp(type, "current-sensor") == 0) { sens->type = SMU_CURRENT_SENSOR; units = "mA"; } else if (strcmp(type, "temp-sensor") == 0) { sens->type = SMU_TEMP_SENSOR; units = "C"; } else if (strcmp(type, "voltage-sensor") == 0) { sens->type = SMU_VOLTAGE_SENSOR; units = "mV"; } else if (strcmp(type, "power-sensor") == 0) { sens->type = SMU_POWER_SENSOR; units = "mW"; } else { continue; } OF_getprop(child, "reg", &sens->reg, sizeof(cell_t)); OF_getprop(child, "zone", &sens->therm.zone, sizeof(int)); OF_getprop(child, "location", sens->therm.name, sizeof(sens->therm.name)); for (i = 0; i < strlen(sens->therm.name); i++) { sysctl_name[i] = tolower(sens->therm.name[i]); if (isspace(sysctl_name[i])) sysctl_name[i] = '_'; } sysctl_name[i] = 0; sprintf(sysctl_desc,"%s (%s)", sens->therm.name, units); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(sensroot_oid), OID_AUTO, sysctl_name, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, dev, sc->sc_nsensors, smu_sensor_sysctl, (sens->type == SMU_TEMP_SENSOR) ? "IK" : "I", sysctl_desc); if (sens->type == SMU_TEMP_SENSOR) { /* Make up some numbers */ sens->therm.target_temp = 500 + 2731; /* 50 C */ sens->therm.max_temp = 900 + 2731; /* 90 C */ sens->therm.read = (int (*)(struct pmac_therm *))smu_sensor_read; pmac_thermal_sensor_register(&sens->therm); } sens++; sc->sc_nsensors++; } } static void smu_set_sleepled(void *xdev, int onoff) { static struct smu_cmd cmd; device_t smu = xdev; cmd.cmd = SMU_MISC; cmd.len = 3; cmd.data[0] = SMU_MISC_LED_CTRL; cmd.data[1] = 0; cmd.data[2] = onoff; smu_run_cmd(smu, &cmd, 0); } static int smu_server_mode(SYSCTL_HANDLER_ARGS) { struct smu_cmd cmd; u_int server_mode; device_t smu = arg1; int error; cmd.cmd = SMU_POWER_EVENTS; cmd.len = 1; cmd.data[0] = SMU_PWR_GET_POWERUP; error = smu_run_cmd(smu, &cmd, 1); if (error) return (error); server_mode = (cmd.data[1] & SMU_WAKEUP_AC_INSERT) ? 1 : 0; error = sysctl_handle_int(oidp, &server_mode, 0, req); if (error || !req->newptr) return (error); if (server_mode == 1) cmd.data[0] = SMU_PWR_SET_POWERUP; else if (server_mode == 0) cmd.data[0] = SMU_PWR_CLR_POWERUP; else return (EINVAL); cmd.len = 3; cmd.data[1] = 0; cmd.data[2] = SMU_WAKEUP_AC_INSERT; return (smu_run_cmd(smu, &cmd, 1)); } static void smu_shutdown(void *xdev, int howto) { device_t smu = xdev; struct smu_cmd cmd; cmd.cmd = SMU_POWER; if (howto & RB_HALT) strcpy(cmd.data, "SHUTDOWN"); else strcpy(cmd.data, "RESTART"); cmd.len = strlen(cmd.data); smu_run_cmd(smu, &cmd, 1); for (;;); } static int smu_gettime(device_t dev, struct timespec *ts) { struct smu_cmd cmd; struct clocktime ct; cmd.cmd = SMU_RTC; cmd.len = 1; cmd.data[0] = SMU_RTC_GET; if (smu_run_cmd(dev, &cmd, 1) != 0) return (ENXIO); ct.nsec = 0; ct.sec = bcd2bin(cmd.data[0]); ct.min = bcd2bin(cmd.data[1]); ct.hour = bcd2bin(cmd.data[2]); ct.dow = bcd2bin(cmd.data[3]); ct.day = bcd2bin(cmd.data[4]); ct.mon = bcd2bin(cmd.data[5]); ct.year = bcd2bin(cmd.data[6]) + 2000; return (clock_ct_to_ts(&ct, ts)); } static int smu_settime(device_t dev, struct timespec *ts) { static struct smu_cmd cmd; struct clocktime ct; cmd.cmd = SMU_RTC; cmd.len = 8; cmd.data[0] = SMU_RTC_SET; clock_ts_to_ct(ts, &ct); cmd.data[1] = bin2bcd(ct.sec); cmd.data[2] = bin2bcd(ct.min); cmd.data[3] = bin2bcd(ct.hour); cmd.data[4] = bin2bcd(ct.dow); cmd.data[5] = bin2bcd(ct.day); cmd.data[6] = bin2bcd(ct.mon); cmd.data[7] = bin2bcd(ct.year - 2000); return (smu_run_cmd(dev, &cmd, 0)); } /* SMU I2C Interface */ static int smuiic_probe(device_t dev); static int smuiic_attach(device_t dev); static int smuiic_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs); static phandle_t smuiic_get_node(device_t bus, device_t dev); static device_method_t smuiic_methods[] = { /* device interface */ DEVMETHOD(device_probe, smuiic_probe), DEVMETHOD(device_attach, smuiic_attach), /* iicbus interface */ DEVMETHOD(iicbus_callback, iicbus_null_callback), DEVMETHOD(iicbus_transfer, smuiic_transfer), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, smuiic_get_node), { 0, 0 } }; struct smuiic_softc { struct mtx sc_mtx; volatile int sc_iic_inuse; int sc_busno; }; static driver_t smuiic_driver = { "iichb", smuiic_methods, sizeof(struct smuiic_softc) }; static devclass_t smuiic_devclass; DRIVER_MODULE(smuiic, smu, smuiic_driver, smuiic_devclass, 0, 0); static void smu_attach_i2c(device_t smu, phandle_t i2croot) { phandle_t child; device_t cdev; struct ofw_bus_devinfo *dinfo; char name[32]; for (child = OF_child(i2croot); child != 0; child = OF_peer(child)) { if (OF_getprop(child, "name", name, sizeof(name)) <= 0) continue; if (strcmp(name, "i2c-bus") != 0 && strcmp(name, "i2c") != 0) continue; dinfo = malloc(sizeof(struct ofw_bus_devinfo), M_SMU, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(dinfo, child) != 0) { free(dinfo, M_SMU); continue; } cdev = device_add_child(smu, NULL, -1); if (cdev == NULL) { device_printf(smu, "<%s>: device_add_child failed\n", dinfo->obd_name); ofw_bus_gen_destroy_devinfo(dinfo); free(dinfo, M_SMU); continue; } device_set_ivars(cdev, dinfo); } } static int smuiic_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (name == NULL) return (ENXIO); if (strcmp(name, "i2c-bus") == 0 || strcmp(name, "i2c") == 0) { device_set_desc(dev, "SMU I2C controller"); return (0); } return (ENXIO); } static int smuiic_attach(device_t dev) { struct smuiic_softc *sc = device_get_softc(dev); mtx_init(&sc->sc_mtx, "smuiic", NULL, MTX_DEF); sc->sc_iic_inuse = 0; /* Get our bus number */ OF_getprop(ofw_bus_get_node(dev), "reg", &sc->sc_busno, sizeof(sc->sc_busno)); /* Add the IIC bus layer */ device_add_child(dev, "iicbus", -1); return (bus_generic_attach(dev)); } static int smuiic_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { struct smuiic_softc *sc = device_get_softc(dev); struct smu_cmd cmd; int i, j, error; mtx_lock(&sc->sc_mtx); while (sc->sc_iic_inuse) mtx_sleep(sc, &sc->sc_mtx, 0, "smuiic", 100); sc->sc_iic_inuse = 1; error = 0; for (i = 0; i < nmsgs; i++) { cmd.cmd = SMU_I2C; cmd.data[0] = sc->sc_busno; if (msgs[i].flags & IIC_M_NOSTOP) cmd.data[1] = SMU_I2C_COMBINED; else cmd.data[1] = SMU_I2C_SIMPLE; cmd.data[2] = msgs[i].slave; if (msgs[i].flags & IIC_M_RD) cmd.data[2] |= 1; if (msgs[i].flags & IIC_M_NOSTOP) { KASSERT(msgs[i].len < 4, ("oversize I2C combined message")); cmd.data[3] = min(msgs[i].len, 3); memcpy(&cmd.data[4], msgs[i].buf, min(msgs[i].len, 3)); i++; /* Advance to next part of message */ } else { cmd.data[3] = 0; memset(&cmd.data[4], 0, 3); } cmd.data[7] = msgs[i].slave; if (msgs[i].flags & IIC_M_RD) cmd.data[7] |= 1; cmd.data[8] = msgs[i].len; if (msgs[i].flags & IIC_M_RD) { memset(&cmd.data[9], 0xff, msgs[i].len); cmd.len = 9; } else { memcpy(&cmd.data[9], msgs[i].buf, msgs[i].len); cmd.len = 9 + msgs[i].len; } mtx_unlock(&sc->sc_mtx); smu_run_cmd(device_get_parent(dev), &cmd, 1); mtx_lock(&sc->sc_mtx); for (j = 0; j < 10; j++) { cmd.cmd = SMU_I2C; cmd.len = 1; cmd.data[0] = 0; memset(&cmd.data[1], 0xff, msgs[i].len); mtx_unlock(&sc->sc_mtx); smu_run_cmd(device_get_parent(dev), &cmd, 1); mtx_lock(&sc->sc_mtx); if (!(cmd.data[0] & 0x80)) break; mtx_sleep(sc, &sc->sc_mtx, 0, "smuiic", 10); } if (cmd.data[0] & 0x80) { error = EIO; msgs[i].len = 0; goto exit; } memcpy(msgs[i].buf, &cmd.data[1], msgs[i].len); msgs[i].len = cmd.len - 1; } exit: sc->sc_iic_inuse = 0; mtx_unlock(&sc->sc_mtx); wakeup(sc); return (error); } static phandle_t smuiic_get_node(device_t bus, device_t dev) { return (ofw_bus_get_node(bus)); } diff --git a/sys/powerpc/powermac/uninorth.c b/sys/powerpc/powermac/uninorth.c index f1f6ffafad63..64371162a05f 100644 --- a/sys/powerpc/powermac/uninorth.c +++ b/sys/powerpc/powermac/uninorth.c @@ -1,672 +1,672 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2002 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Driver for the Uninorth chip itself. */ static MALLOC_DEFINE(M_UNIN, "unin", "unin device information"); /* * Device interface. */ static int unin_chip_probe(device_t); static int unin_chip_attach(device_t); /* * Bus interface. */ static int unin_chip_print_child(device_t dev, device_t child); static void unin_chip_probe_nomatch(device_t, device_t); static struct resource *unin_chip_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int unin_chip_activate_resource(device_t, device_t, int, int, struct resource *); static int unin_chip_deactivate_resource(device_t, device_t, int, int, struct resource *); static int unin_chip_release_resource(device_t, device_t, int, int, struct resource *); static struct resource_list *unin_chip_get_resource_list (device_t, device_t); /* * OFW Bus interface */ static ofw_bus_get_devinfo_t unin_chip_get_devinfo; /* * Local routines */ static void unin_enable_gmac(device_t dev); static void unin_enable_mpic(device_t dev); /* * Driver methods. */ static device_method_t unin_chip_methods[] = { /* Device interface */ DEVMETHOD(device_probe, unin_chip_probe), DEVMETHOD(device_attach, unin_chip_attach), /* Bus interface */ DEVMETHOD(bus_print_child, unin_chip_print_child), DEVMETHOD(bus_probe_nomatch, unin_chip_probe_nomatch), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, unin_chip_alloc_resource), DEVMETHOD(bus_release_resource, unin_chip_release_resource), DEVMETHOD(bus_activate_resource, unin_chip_activate_resource), DEVMETHOD(bus_deactivate_resource, unin_chip_deactivate_resource), DEVMETHOD(bus_get_resource_list, unin_chip_get_resource_list), - DEVMETHOD(bus_child_pnpinfo_str, ofw_bus_gen_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, unin_chip_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t unin_chip_driver = { "unin", unin_chip_methods, sizeof(struct unin_chip_softc) }; static devclass_t unin_chip_devclass; /* * Assume there is only one unin chip in a PowerMac, so that pmu.c functions can * suspend the chip after the whole rest of the device tree is suspended, not * earlier. */ static device_t unin_chip; EARLY_DRIVER_MODULE(unin, ofwbus, unin_chip_driver, unin_chip_devclass, 0, 0, BUS_PASS_BUS); /* * Add an interrupt to the dev's resource list if present */ static void unin_chip_add_intr(phandle_t devnode, struct unin_chip_devinfo *dinfo) { phandle_t iparent; int *intr; int i, nintr; int icells; if (dinfo->udi_ninterrupts >= 6) { printf("unin: device has more than 6 interrupts\n"); return; } nintr = OF_getprop_alloc_multi(devnode, "interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) { nintr = OF_getprop_alloc_multi(devnode, "AAPL,interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) return; } if (intr[0] == -1) return; if (OF_getprop(devnode, "interrupt-parent", &iparent, sizeof(iparent)) <= 0) panic("Interrupt but no interrupt parent!\n"); if (OF_searchprop(iparent, "#interrupt-cells", &icells, sizeof(icells)) <= 0) icells = 1; for (i = 0; i < nintr; i+=icells) { u_int irq = MAP_IRQ(iparent, intr[i]); resource_list_add(&dinfo->udi_resources, SYS_RES_IRQ, dinfo->udi_ninterrupts, irq, irq, 1); if (icells > 1) { powerpc_config_intr(irq, (intr[i+1] & 1) ? INTR_TRIGGER_LEVEL : INTR_TRIGGER_EDGE, INTR_POLARITY_LOW); } dinfo->udi_interrupts[dinfo->udi_ninterrupts] = irq; dinfo->udi_ninterrupts++; } } static void unin_chip_add_reg(phandle_t devnode, struct unin_chip_devinfo *dinfo) { struct unin_chip_reg *reg; int i, nreg; nreg = OF_getprop_alloc_multi(devnode, "reg", sizeof(*reg), (void **)®); if (nreg == -1) return; for (i = 0; i < nreg; i++) { resource_list_add(&dinfo->udi_resources, SYS_RES_MEMORY, i, reg[i].mr_base, reg[i].mr_base + reg[i].mr_size, reg[i].mr_size); } } static void unin_update_reg(device_t dev, uint32_t regoff, uint32_t set, uint32_t clr) { volatile u_int *reg; struct unin_chip_softc *sc; u_int32_t tmpl; sc = device_get_softc(dev); reg = (void *)(sc->sc_addr + regoff); tmpl = inl(reg); tmpl &= ~clr; tmpl |= set; outl(reg, tmpl); } static void unin_enable_gmac(device_t dev) { unin_update_reg(dev, UNIN_CLOCKCNTL, UNIN_CLOCKCNTL_GMAC, 0); } static void unin_enable_mpic(device_t dev) { unin_update_reg(dev, UNIN_TOGGLE_REG, UNIN_MPIC_RESET | UNIN_MPIC_OUTPUT_ENABLE, 0); } static int unin_chip_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (name == NULL) return (ENXIO); if (strcmp(name, "uni-n") != 0 && strcmp(name, "u3") != 0 && strcmp(name, "u4") != 0) return (ENXIO); device_set_desc(dev, "Apple UniNorth System Controller"); return (0); } static int unin_chip_attach(device_t dev) { struct unin_chip_softc *sc; struct unin_chip_devinfo *dinfo; phandle_t root; phandle_t child; phandle_t iparent; device_t cdev; cell_t acells, scells; char compat[32]; char name[32]; u_int irq, reg[3]; int error, i = 0; sc = device_get_softc(dev); root = ofw_bus_get_node(dev); if (OF_getprop(root, "reg", reg, sizeof(reg)) < 8) return (ENXIO); acells = scells = 1; OF_getprop(OF_parent(root), "#address-cells", &acells, sizeof(acells)); OF_getprop(OF_parent(root), "#size-cells", &scells, sizeof(scells)); i = 0; sc->sc_physaddr = reg[i++]; if (acells == 2) { sc->sc_physaddr <<= 32; sc->sc_physaddr |= reg[i++]; } sc->sc_size = reg[i++]; if (scells == 2) { sc->sc_size <<= 32; sc->sc_size |= reg[i++]; } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "UniNorth Device Memory"; error = rman_init(&sc->sc_mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } error = rman_manage_region(&sc->sc_mem_rman, sc->sc_physaddr, sc->sc_physaddr + sc->sc_size - 1); if (error) { device_printf(dev, "rman_manage_region() failed. error = %d\n", error); return (error); } if (unin_chip == NULL) unin_chip = dev; /* * Iterate through the sub-devices */ for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_UNIN, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->udi_obdinfo, child) != 0) { free(dinfo, M_UNIN); continue; } resource_list_init(&dinfo->udi_resources); dinfo->udi_ninterrupts = 0; unin_chip_add_intr(child, dinfo); /* * Some Apple machines do have a bug in OF, they miss * the interrupt entries on the U3 I2C node. That means they * do not have an entry with number of interrupts nor the * entry of the interrupt parent handle. * We define an interrupt and hardwire it to the /u3/mpic * handle. */ if (OF_getprop(child, "name", name, sizeof(name)) <= 0) device_printf(dev, "device has no name!\n"); if (dinfo->udi_ninterrupts == 0 && (strcmp(name, "i2c-bus") == 0 || strcmp(name, "i2c") == 0)) { if (OF_getprop(child, "interrupt-parent", &iparent, sizeof(iparent)) <= 0) { iparent = OF_finddevice("/u3/mpic"); device_printf(dev, "Set /u3/mpic as iparent!\n"); } /* Add an interrupt number 0 to the parent. */ irq = MAP_IRQ(iparent, 0); resource_list_add(&dinfo->udi_resources, SYS_RES_IRQ, dinfo->udi_ninterrupts, irq, irq, 1); dinfo->udi_interrupts[dinfo->udi_ninterrupts] = irq; dinfo->udi_ninterrupts++; } unin_chip_add_reg(child, dinfo); cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->udi_obdinfo.obd_name); resource_list_free(&dinfo->udi_resources); ofw_bus_gen_destroy_devinfo(&dinfo->udi_obdinfo); free(dinfo, M_UNIN); continue; } device_set_ivars(cdev, dinfo); } /* * Only map the first page, since that is where the registers * of interest lie. */ sc->sc_addr = (vm_offset_t)pmap_mapdev(sc->sc_physaddr, PAGE_SIZE); sc->sc_version = *(u_int *)sc->sc_addr; device_printf(dev, "Version %d\n", sc->sc_version); /* * Enable the GMAC Ethernet cell and the integrated OpenPIC * if Open Firmware says they are used. */ for (child = OF_child(root); child; child = OF_peer(child)) { memset(compat, 0, sizeof(compat)); OF_getprop(child, "compatible", compat, sizeof(compat)); if (strcmp(compat, "gmac") == 0) unin_enable_gmac(dev); if (strcmp(compat, "chrp,open-pic") == 0) unin_enable_mpic(dev); } /* * GMAC lives under the PCI bus, so just check if enet is gmac. */ child = OF_finddevice("enet"); memset(compat, 0, sizeof(compat)); OF_getprop(child, "compatible", compat, sizeof(compat)); if (strcmp(compat, "gmac") == 0) unin_enable_gmac(dev); return (bus_generic_attach(dev)); } static int unin_chip_print_child(device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->udi_resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static void unin_chip_probe_nomatch(device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; struct resource_list *rl; const char *type; if (bootverbose) { dinfo = device_get_ivars(child); rl = &dinfo->udi_resources; if ((type = ofw_bus_get_type(child)) == NULL) type = "(unknown)"; device_printf(dev, "<%s, %s>", type, ofw_bus_get_name(child)); resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); printf(" (no driver attached)\n"); } } static struct resource * unin_chip_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct unin_chip_softc *sc; int needactivate; struct resource *rv; struct rman *rm; u_long adjstart, adjend, adjcount; struct unin_chip_devinfo *dinfo; struct resource_list_entry *rle; sc = device_get_softc(bus); dinfo = device_get_ivars(child); needactivate = flags & RF_ACTIVE; flags &= ~RF_ACTIVE; switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rle = resource_list_find(&dinfo->udi_resources, SYS_RES_MEMORY, *rid); if (rle == NULL) { device_printf(bus, "no rle for %s memory %d\n", device_get_nameunit(child), *rid); return (NULL); } rle->end = rle->end - 1; /* Hack? */ if (start < rle->start) adjstart = rle->start; else if (start > rle->end) adjstart = rle->end; else adjstart = start; if (end < rle->start) adjend = rle->start; else if (end > rle->end) adjend = rle->end; else adjend = end; adjcount = adjend - adjstart; rm = &sc->sc_mem_rman; break; case SYS_RES_IRQ: /* Check for passthrough from subattachments. */ if (device_get_parent(child) != bus) return BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); rle = resource_list_find(&dinfo->udi_resources, SYS_RES_IRQ, *rid); if (rle == NULL) { if (dinfo->udi_ninterrupts >= 6) { device_printf(bus, "%s has more than 6 interrupts\n", device_get_nameunit(child)); return (NULL); } resource_list_add(&dinfo->udi_resources, SYS_RES_IRQ, dinfo->udi_ninterrupts, start, start, 1); dinfo->udi_interrupts[dinfo->udi_ninterrupts] = start; dinfo->udi_ninterrupts++; } return (resource_list_alloc(&dinfo->udi_resources, bus, child, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } rv = rman_reserve_resource(rm, adjstart, adjend, adjcount, flags, child); if (rv == NULL) { device_printf(bus, "failed to reserve resource %#lx - %#lx (%#lx)" " for %s\n", adjstart, adjend, adjcount, device_get_nameunit(child)); return (NULL); } rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv) != 0) { device_printf(bus, "failed to activate resource for %s\n", device_get_nameunit(child)); rman_release_resource(rv); return (NULL); } } return (rv); } static int unin_chip_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { if (rman_get_flags(res) & RF_ACTIVE) { int error = bus_deactivate_resource(child, type, rid, res); if (error) return error; } return (rman_release_resource(res)); } static int unin_chip_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { void *p; if (type == SYS_RES_IRQ) return (bus_activate_resource(bus, type, rid, res)); if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { vm_offset_t start; start = (vm_offset_t) rman_get_start(res); if (bootverbose) printf("unin mapdev: start %zx, len %jd\n", start, rman_get_size(res)); p = pmap_mapdev(start, (vm_size_t) rman_get_size(res)); if (p == NULL) return (ENOMEM); rman_set_virtual(res, p); rman_set_bustag(res, &bs_be_tag); rman_set_bushandle(res, (u_long)p); } return (rman_activate_resource(res)); } static int unin_chip_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { /* * If this is a memory resource, unmap it. */ if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { u_int32_t psize; psize = rman_get_size(res); pmap_unmapdev((vm_offset_t)rman_get_virtual(res), psize); } return (rman_deactivate_resource(res)); } static struct resource_list * unin_chip_get_resource_list (device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->udi_resources); } static const struct ofw_bus_devinfo * unin_chip_get_devinfo(device_t dev, device_t child) { struct unin_chip_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->udi_obdinfo); } int unin_chip_wake(device_t dev) { if (dev == NULL) dev = unin_chip; unin_update_reg(dev, UNIN_PWR_MGMT, UNIN_PWR_NORMAL, UNIN_PWR_MASK); DELAY(10); unin_update_reg(dev, UNIN_HWINIT_STATE, UNIN_RUNNING, 0); DELAY(100); return (0); } int unin_chip_sleep(device_t dev, int idle) { if (dev == NULL) dev = unin_chip; unin_update_reg(dev, UNIN_HWINIT_STATE, UNIN_SLEEPING, 0); DELAY(10); if (idle) unin_update_reg(dev, UNIN_PWR_MGMT, UNIN_PWR_IDLE2, UNIN_PWR_MASK); else unin_update_reg(dev, UNIN_PWR_MGMT, UNIN_PWR_SLEEP, UNIN_PWR_MASK); DELAY(10); return (0); } diff --git a/sys/powerpc/powernv/opal_dev.c b/sys/powerpc/powernv/opal_dev.c index e23844fde04d..d07dea566303 100644 --- a/sys/powerpc/powernv/opal_dev.c +++ b/sys/powerpc/powernv/opal_dev.c @@ -1,438 +1,438 @@ /*- * Copyright (c) 2015 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "clock_if.h" #include "opal.h" static int opaldev_probe(device_t); static int opaldev_attach(device_t); /* clock interface */ static int opal_gettime(device_t dev, struct timespec *ts); static int opal_settime(device_t dev, struct timespec *ts); /* ofw bus interface */ static const struct ofw_bus_devinfo *opaldev_get_devinfo(device_t dev, device_t child); static void opal_shutdown(void *arg, int howto); static void opal_handle_shutdown_message(void *unused, struct opal_msg *msg); static void opal_intr(void *); static device_method_t opaldev_methods[] = { /* Device interface */ DEVMETHOD(device_probe, opaldev_probe), DEVMETHOD(device_attach, opaldev_attach), /* clock interface */ DEVMETHOD(clock_gettime, opal_gettime), DEVMETHOD(clock_settime, opal_settime), /* Bus interface */ - DEVMETHOD(bus_child_pnpinfo_str, ofw_bus_gen_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, opaldev_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), DEVMETHOD_END }; static driver_t opaldev_driver = { "opal", opaldev_methods, 0 }; static devclass_t opaldev_devclass; EARLY_DRIVER_MODULE(opaldev, ofwbus, opaldev_driver, opaldev_devclass, 0, 0, BUS_PASS_BUS); static void opal_heartbeat(void); static void opal_handle_messages(void); static struct proc *opal_hb_proc; static struct kproc_desc opal_heartbeat_kp = { "opal_heartbeat", opal_heartbeat, &opal_hb_proc }; SYSINIT(opal_heartbeat_setup, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, kproc_start, &opal_heartbeat_kp); static int opal_heartbeat_ms; EVENTHANDLER_LIST_DEFINE(OPAL_ASYNC_COMP); EVENTHANDLER_LIST_DEFINE(OPAL_EPOW); EVENTHANDLER_LIST_DEFINE(OPAL_SHUTDOWN); EVENTHANDLER_LIST_DEFINE(OPAL_HMI_EVT); EVENTHANDLER_LIST_DEFINE(OPAL_DPO); EVENTHANDLER_LIST_DEFINE(OPAL_OCC); #define OPAL_SOFT_OFF 0 #define OPAL_SOFT_REBOOT 1 static void opal_heartbeat(void) { uint64_t events; if (opal_heartbeat_ms == 0) kproc_exit(0); while (1) { events = 0; /* Turn the OPAL state crank */ opal_call(OPAL_POLL_EVENTS, vtophys(&events)); if (be64toh(events) & OPAL_EVENT_MSG_PENDING) opal_handle_messages(); tsleep(opal_hb_proc, 0, "opal", MSEC_2_TICKS(opal_heartbeat_ms)); } } static int opaldev_probe(device_t dev) { phandle_t iparent; pcell_t *irqs; int i, n_irqs; if (!ofw_bus_is_compatible(dev, "ibm,opal-v3")) return (ENXIO); if (opal_check() != 0) return (ENXIO); device_set_desc(dev, "OPAL Abstraction Firmware"); /* Manually add IRQs before attaching */ if (OF_hasprop(ofw_bus_get_node(dev), "opal-interrupts")) { iparent = OF_finddevice("/interrupt-controller@0"); iparent = OF_xref_from_node(iparent); n_irqs = OF_getproplen(ofw_bus_get_node(dev), "opal-interrupts") / sizeof(*irqs); irqs = malloc(n_irqs * sizeof(*irqs), M_DEVBUF, M_WAITOK); OF_getencprop(ofw_bus_get_node(dev), "opal-interrupts", irqs, n_irqs * sizeof(*irqs)); for (i = 0; i < n_irqs; i++) bus_set_resource(dev, SYS_RES_IRQ, i, ofw_bus_map_intr(dev, iparent, 1, &irqs[i]), 1); free(irqs, M_DEVBUF); } return (BUS_PROBE_SPECIFIC); } static int opaldev_attach(device_t dev) { phandle_t child; device_t cdev; uint64_t junk; int i, rv; uint32_t async_count; struct ofw_bus_devinfo *dinfo; struct resource *irq; /* Test for RTC support and register clock if it works */ rv = opal_call(OPAL_RTC_READ, vtophys(&junk), vtophys(&junk)); do { rv = opal_call(OPAL_RTC_READ, vtophys(&junk), vtophys(&junk)); if (rv == OPAL_BUSY_EVENT) rv = opal_call(OPAL_POLL_EVENTS, 0); } while (rv == OPAL_BUSY_EVENT); if (rv == OPAL_SUCCESS) clock_register(dev, 2000); EVENTHANDLER_REGISTER(OPAL_SHUTDOWN, opal_handle_shutdown_message, NULL, EVENTHANDLER_PRI_ANY); EVENTHANDLER_REGISTER(shutdown_final, opal_shutdown, NULL, SHUTDOWN_PRI_LAST); OF_getencprop(ofw_bus_get_node(dev), "ibm,heartbeat-ms", &opal_heartbeat_ms, sizeof(opal_heartbeat_ms)); /* Bind to interrupts */ for (i = 0; (irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, RF_ACTIVE)) != NULL; i++) bus_setup_intr(dev, irq, INTR_TYPE_TTY | INTR_MPSAFE | INTR_ENTROPY, NULL, opal_intr, (void *)rman_get_start(irq), NULL); OF_getencprop(ofw_bus_get_node(dev), "opal-msg-async-num", &async_count, sizeof(async_count)); opal_init_async_tokens(async_count); for (child = OF_child(ofw_bus_get_node(dev)); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(dinfo, child) != 0) { free(dinfo, M_DEVBUF); continue; } cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->obd_name); ofw_bus_gen_destroy_devinfo(dinfo); free(dinfo, M_DEVBUF); continue; } device_set_ivars(cdev, dinfo); } return (bus_generic_attach(dev)); } static int bcd2bin32(int bcd) { int out = 0; out += bcd2bin(bcd & 0xff); out += 100*bcd2bin((bcd & 0x0000ff00) >> 8); out += 10000*bcd2bin((bcd & 0x00ff0000) >> 16); out += 1000000*bcd2bin((bcd & 0xffff0000) >> 24); return (out); } static int bin2bcd32(int bin) { int out = 0; int tmp; tmp = bin % 100; out += bin2bcd(tmp) * 0x1; bin = bin / 100; tmp = bin % 100; out += bin2bcd(tmp) * 0x100; bin = bin / 100; tmp = bin % 100; out += bin2bcd(tmp) * 0x10000; return (out); } static int opal_gettime(device_t dev, struct timespec *ts) { int rv; struct clocktime ct; uint32_t ymd; uint64_t hmsm; rv = opal_call(OPAL_RTC_READ, vtophys(&ymd), vtophys(&hmsm)); while (rv == OPAL_BUSY_EVENT) { opal_call(OPAL_POLL_EVENTS, 0); pause("opalrtc", 1); rv = opal_call(OPAL_RTC_READ, vtophys(&ymd), vtophys(&hmsm)); } if (rv != OPAL_SUCCESS) return (ENXIO); hmsm = be64toh(hmsm); ymd = be32toh(ymd); ct.nsec = bcd2bin32((hmsm & 0x000000ffffff0000) >> 16) * 1000; ct.sec = bcd2bin((hmsm & 0x0000ff0000000000) >> 40); ct.min = bcd2bin((hmsm & 0x00ff000000000000) >> 48); ct.hour = bcd2bin((hmsm & 0xff00000000000000) >> 56); ct.day = bcd2bin((ymd & 0x000000ff) >> 0); ct.mon = bcd2bin((ymd & 0x0000ff00) >> 8); ct.year = bcd2bin32((ymd & 0xffff0000) >> 16); return (clock_ct_to_ts(&ct, ts)); } static int opal_settime(device_t dev, struct timespec *ts) { int rv; struct clocktime ct; uint32_t ymd = 0; uint64_t hmsm = 0; clock_ts_to_ct(ts, &ct); ymd |= (uint32_t)bin2bcd(ct.day); ymd |= ((uint32_t)bin2bcd(ct.mon) << 8); ymd |= ((uint32_t)bin2bcd32(ct.year) << 16); hmsm |= ((uint64_t)bin2bcd32(ct.nsec/1000) << 16); hmsm |= ((uint64_t)bin2bcd(ct.sec) << 40); hmsm |= ((uint64_t)bin2bcd(ct.min) << 48); hmsm |= ((uint64_t)bin2bcd(ct.hour) << 56); /* * We do NOT swap endian here, because the values are being sent * via registers instead of indirect via memory. */ do { rv = opal_call(OPAL_RTC_WRITE, ymd, hmsm); if (rv == OPAL_BUSY_EVENT) { rv = opal_call(OPAL_POLL_EVENTS, 0); pause("opalrtc", 1); } } while (rv == OPAL_BUSY_EVENT); if (rv != OPAL_SUCCESS) return (ENXIO); return (0); } static const struct ofw_bus_devinfo * opaldev_get_devinfo(device_t dev, device_t child) { return (device_get_ivars(child)); } static void opal_shutdown(void *arg, int howto) { if (howto & RB_HALT) opal_call(OPAL_CEC_POWER_DOWN, 0 /* Normal power off */); else opal_call(OPAL_CEC_REBOOT); opal_call(OPAL_RETURN_CPU); } static void opal_handle_shutdown_message(void *unused, struct opal_msg *msg) { int howto; switch (be64toh(msg->params[0])) { case OPAL_SOFT_OFF: howto = RB_POWEROFF; break; case OPAL_SOFT_REBOOT: howto = RB_REROOT; break; } shutdown_nice(howto); } static void opal_handle_messages(void) { static struct opal_msg msg; uint64_t rv; uint32_t type; rv = opal_call(OPAL_GET_MSG, vtophys(&msg), sizeof(msg)); switch (rv) { case OPAL_SUCCESS: break; case OPAL_RESOURCE: /* no available messages - return */ return; case OPAL_PARAMETER: printf("%s error: invalid buffer. Please file a bug report.\n", __func__); return; case OPAL_PARTIAL: printf("%s error: buffer is too small and messages was discarded. Please file a bug report.\n", __func__); return; default: printf("%s opal_call returned unknown result <%lu>\n", __func__, rv); return; } type = be32toh(msg.msg_type); switch (type) { case OPAL_MSG_ASYNC_COMP: EVENTHANDLER_DIRECT_INVOKE(OPAL_ASYNC_COMP, &msg); break; case OPAL_MSG_EPOW: EVENTHANDLER_DIRECT_INVOKE(OPAL_EPOW, &msg); break; case OPAL_MSG_SHUTDOWN: EVENTHANDLER_DIRECT_INVOKE(OPAL_SHUTDOWN, &msg); break; case OPAL_MSG_HMI_EVT: EVENTHANDLER_DIRECT_INVOKE(OPAL_HMI_EVT, &msg); break; case OPAL_MSG_DPO: EVENTHANDLER_DIRECT_INVOKE(OPAL_DPO, &msg); break; case OPAL_MSG_OCC: EVENTHANDLER_DIRECT_INVOKE(OPAL_OCC, &msg); break; default: printf("%s Unknown OPAL message type %d\n", __func__, type); } } static void opal_intr(void *xintr) { uint64_t events = 0; opal_call(OPAL_HANDLE_INTERRUPT, (uint32_t)(uint64_t)xintr, vtophys(&events)); /* Wake up the heartbeat, if it's been setup. */ if (be64toh(events) != 0 && opal_hb_proc != NULL) wakeup(opal_hb_proc); } diff --git a/sys/powerpc/pseries/vdevice.c b/sys/powerpc/pseries/vdevice.c index d300b2e8457b..fa0fafb2d67d 100644 --- a/sys/powerpc/pseries/vdevice.c +++ b/sys/powerpc/pseries/vdevice.c @@ -1,218 +1,218 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iommu_if.h" static int vdevice_probe(device_t); static int vdevice_attach(device_t); static const struct ofw_bus_devinfo *vdevice_get_devinfo(device_t dev, device_t child); static int vdevice_print_child(device_t dev, device_t child); static struct resource_list *vdevice_get_resource_list(device_t, device_t); static bus_dma_tag_t vdevice_get_dma_tag(device_t dev, device_t child); /* * VDevice devinfo */ struct vdevice_devinfo { struct ofw_bus_devinfo mdi_obdinfo; struct resource_list mdi_resources; bus_dma_tag_t mdi_dma_tag; }; static device_method_t vdevice_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vdevice_probe), DEVMETHOD(device_attach, vdevice_attach), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), - DEVMETHOD(bus_child_pnpinfo_str, ofw_bus_gen_child_pnpinfo_str), + DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo), DEVMETHOD(bus_print_child, vdevice_print_child), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_get_resource_list, vdevice_get_resource_list), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, vdevice_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), /* IOMMU interface */ DEVMETHOD(bus_get_dma_tag, vdevice_get_dma_tag), DEVMETHOD(iommu_map, phyp_iommu_map), DEVMETHOD(iommu_unmap, phyp_iommu_unmap), DEVMETHOD_END }; static driver_t vdevice_driver = { "vdevice", vdevice_methods, 0 }; static devclass_t vdevice_devclass; DRIVER_MODULE(vdevice, ofwbus, vdevice_driver, vdevice_devclass, 0, 0); static int vdevice_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (name == NULL || strcmp(name, "vdevice") != 0) return (ENXIO); if (!ofw_bus_is_compatible(dev, "IBM,vdevice")) return (ENXIO); device_set_desc(dev, "POWER Hypervisor Virtual Device Root"); return (0); } static int vdevice_attach(device_t dev) { phandle_t root, child; device_t cdev; struct vdevice_devinfo *dinfo; root = ofw_bus_get_node(dev); /* The XICP (root PIC) will handle all our interrupts */ powerpc_register_pic(root_pic, OF_xref_from_node(root), 1 << 24 /* 24-bit XIRR field */, 1 /* Number of IPIs */, FALSE); for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->mdi_obdinfo, child) != 0) { free(dinfo, M_DEVBUF); continue; } resource_list_init(&dinfo->mdi_resources); ofw_bus_intr_to_rl(dev, child, &dinfo->mdi_resources, NULL); cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->mdi_obdinfo.obd_name); ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_DEVBUF); continue; } device_set_ivars(cdev, dinfo); } return (bus_generic_attach(dev)); } static const struct ofw_bus_devinfo * vdevice_get_devinfo(device_t dev, device_t child) { return (device_get_ivars(child)); } static int vdevice_print_child(device_t dev, device_t child) { struct vdevice_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->mdi_resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static struct resource_list * vdevice_get_resource_list (device_t dev, device_t child) { struct vdevice_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_resources); } static bus_dma_tag_t vdevice_get_dma_tag(device_t dev, device_t child) { struct vdevice_devinfo *dinfo; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); dinfo = device_get_ivars(child); if (dinfo->mdi_dma_tag == NULL) { bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &dinfo->mdi_dma_tag); phyp_iommu_set_dma_tag(dev, child, dinfo->mdi_dma_tag); } return (dinfo->mdi_dma_tag); } diff --git a/sys/sys/bus.h b/sys/sys/bus.h index 668201ffbcd5..c9ac84020ba5 100644 --- a/sys/sys/bus.h +++ b/sys/sys/bus.h @@ -1,978 +1,980 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997,1998,2003 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_BUS_H_ #define _SYS_BUS_H_ #include #include #include #include /** * @defgroup NEWBUS newbus - a generic framework for managing devices * @{ */ /** * @brief Interface information structure. */ struct u_businfo { int ub_version; /**< @brief interface version */ #define BUS_USER_VERSION 2 int ub_generation; /**< @brief generation count */ }; /** * @brief State of the device. */ typedef enum device_state { DS_NOTPRESENT = 10, /**< @brief not probed or probe failed */ DS_ALIVE = 20, /**< @brief probe succeeded */ DS_ATTACHING = 25, /**< @brief currently attaching */ DS_ATTACHED = 30, /**< @brief attach method called */ DS_BUSY = 40 /**< @brief device is open */ } device_state_t; /** * @brief Device information exported to userspace. * The strings are placed one after the other, separated by NUL characters. * Fields should be added after the last one and order maintained for compatibility */ #define BUS_USER_BUFFER (3*1024) struct u_device { uintptr_t dv_handle; uintptr_t dv_parent; uint32_t dv_devflags; /**< @brief API Flags for device */ uint16_t dv_flags; /**< @brief flags for dev state */ device_state_t dv_state; /**< @brief State of attachment */ char dv_fields[BUS_USER_BUFFER]; /**< @brief NUL terminated fields */ /* name (name of the device in tree) */ /* desc (driver description) */ /* drivername (Name of driver without unit number) */ /* pnpinfo (Plug and play information from bus) */ /* location (Location of device on parent */ /* NUL */ }; /* Flags exported via dv_flags. */ #define DF_ENABLED 0x01 /* device should be probed/attached */ #define DF_FIXEDCLASS 0x02 /* devclass specified at create time */ #define DF_WILDCARD 0x04 /* unit was originally wildcard */ #define DF_DESCMALLOCED 0x08 /* description was malloced */ #define DF_QUIET 0x10 /* don't print verbose attach message */ #define DF_DONENOMATCH 0x20 /* don't execute DEVICE_NOMATCH again */ #define DF_EXTERNALSOFTC 0x40 /* softc not allocated by us */ #define DF_REBID 0x80 /* Can rebid after attach */ #define DF_SUSPENDED 0x100 /* Device is suspended. */ #define DF_QUIET_CHILDREN 0x200 /* Default to quiet for all my children */ #define DF_ATTACHED_ONCE 0x400 /* Has been attached at least once */ #define DF_NEEDNOMATCH 0x800 /* Has a pending NOMATCH event */ /** * @brief Device request structure used for ioctl's. * * Used for ioctl's on /dev/devctl2. All device ioctl's * must have parameter definitions which begin with dr_name. */ struct devreq_buffer { void *buffer; size_t length; }; struct devreq { char dr_name[128]; int dr_flags; /* request-specific flags */ union { struct devreq_buffer dru_buffer; void *dru_data; } dr_dru; #define dr_buffer dr_dru.dru_buffer /* variable-sized buffer */ #define dr_data dr_dru.dru_data /* fixed-size buffer */ }; #define DEV_ATTACH _IOW('D', 1, struct devreq) #define DEV_DETACH _IOW('D', 2, struct devreq) #define DEV_ENABLE _IOW('D', 3, struct devreq) #define DEV_DISABLE _IOW('D', 4, struct devreq) #define DEV_SUSPEND _IOW('D', 5, struct devreq) #define DEV_RESUME _IOW('D', 6, struct devreq) #define DEV_SET_DRIVER _IOW('D', 7, struct devreq) #define DEV_CLEAR_DRIVER _IOW('D', 8, struct devreq) #define DEV_RESCAN _IOW('D', 9, struct devreq) #define DEV_DELETE _IOW('D', 10, struct devreq) #define DEV_FREEZE _IOW('D', 11, struct devreq) #define DEV_THAW _IOW('D', 12, struct devreq) #define DEV_RESET _IOW('D', 13, struct devreq) /* Flags for DEV_DETACH and DEV_DISABLE. */ #define DEVF_FORCE_DETACH 0x0000001 /* Flags for DEV_SET_DRIVER. */ #define DEVF_SET_DRIVER_DETACH 0x0000001 /* Detach existing driver. */ /* Flags for DEV_CLEAR_DRIVER. */ #define DEVF_CLEAR_DRIVER_DETACH 0x0000001 /* Detach existing driver. */ /* Flags for DEV_DELETE. */ #define DEVF_FORCE_DELETE 0x0000001 /* Flags for DEV_RESET */ #define DEVF_RESET_DETACH 0x0000001 /* Detach drivers vs suspend device */ #ifdef _KERNEL #include #include #include #include /** * Device name parsers. Hook to allow device enumerators to map * scheme-specific names to a device. */ typedef void (*dev_lookup_fn)(void *arg, const char *name, device_t *result); EVENTHANDLER_DECLARE(dev_lookup, dev_lookup_fn); /** * @brief A device driver. * * Provides an abstraction layer for driver dispatch. */ typedef struct kobj_class driver_t; /** * @brief A device class * * The devclass object has two main functions in the system. The first * is to manage the allocation of unit numbers for device instances * and the second is to hold the list of device drivers for a * particular bus type. Each devclass has a name and there cannot be * two devclasses with the same name. This ensures that unique unit * numbers are allocated to device instances. * * Drivers that support several different bus attachments (e.g. isa, * pci, pccard) should all use the same devclass to ensure that unit * numbers do not conflict. * * Each devclass may also have a parent devclass. This is used when * searching for device drivers to allow a form of inheritance. When * matching drivers with devices, first the driver list of the parent * device's devclass is searched. If no driver is found in that list, * the search continues in the parent devclass (if any). */ typedef struct devclass *devclass_t; /** * @brief A device method */ #define device_method_t kobj_method_t /** * @brief Driver interrupt filter return values * * If a driver provides an interrupt filter routine it must return an * integer consisting of oring together zero or more of the following * flags: * * FILTER_STRAY - this device did not trigger the interrupt * FILTER_HANDLED - the interrupt has been fully handled and can be EOId * FILTER_SCHEDULE_THREAD - the threaded interrupt handler should be * scheduled to execute * * If the driver does not provide a filter, then the interrupt code will * act is if the filter had returned FILTER_SCHEDULE_THREAD. Note that it * is illegal to specify any other flag with FILTER_STRAY and that it is * illegal to not specify either of FILTER_HANDLED or FILTER_SCHEDULE_THREAD * if FILTER_STRAY is not specified. */ #define FILTER_STRAY 0x01 #define FILTER_HANDLED 0x02 #define FILTER_SCHEDULE_THREAD 0x04 /** * @brief Driver interrupt service routines * * The filter routine is run in primary interrupt context and may not * block or use regular mutexes. It may only use spin mutexes for * synchronization. The filter may either completely handle the * interrupt or it may perform some of the work and defer more * expensive work to the regular interrupt handler. If a filter * routine is not registered by the driver, then the regular interrupt * handler is always used to handle interrupts from this device. * * The regular interrupt handler executes in its own thread context * and may use regular mutexes. However, it is prohibited from * sleeping on a sleep queue. */ typedef int driver_filter_t(void*); typedef void driver_intr_t(void*); /** * @brief Interrupt type bits. * * These flags are used both by newbus interrupt * registration (nexus.c) and also in struct intrec, which defines * interrupt properties. * * XXX We should probably revisit this and remove the vestiges of the * spls implicit in names like INTR_TYPE_TTY. In the meantime, don't * confuse things by renaming them (Grog, 18 July 2000). * * Buses which do interrupt remapping will want to change their type * to reflect what sort of devices are underneath. */ enum intr_type { INTR_TYPE_TTY = 1, INTR_TYPE_BIO = 2, INTR_TYPE_NET = 4, INTR_TYPE_CAM = 8, INTR_TYPE_MISC = 16, INTR_TYPE_CLK = 32, INTR_TYPE_AV = 64, INTR_EXCL = 256, /* exclusive interrupt */ INTR_MPSAFE = 512, /* this interrupt is SMP safe */ INTR_ENTROPY = 1024, /* this interrupt provides entropy */ INTR_MD1 = 4096, /* flag reserved for MD use */ INTR_MD2 = 8192, /* flag reserved for MD use */ INTR_MD3 = 16384, /* flag reserved for MD use */ INTR_MD4 = 32768 /* flag reserved for MD use */ }; enum intr_trigger { INTR_TRIGGER_INVALID = -1, INTR_TRIGGER_CONFORM = 0, INTR_TRIGGER_EDGE = 1, INTR_TRIGGER_LEVEL = 2 }; enum intr_polarity { INTR_POLARITY_CONFORM = 0, INTR_POLARITY_HIGH = 1, INTR_POLARITY_LOW = 2 }; /** * CPU sets supported by bus_get_cpus(). Note that not all sets may be * supported for a given device. If a request is not supported by a * device (or its parents), then bus_get_cpus() will fail with EINVAL. */ enum cpu_sets { LOCAL_CPUS = 0, INTR_CPUS }; typedef int (*devop_t)(void); /** * @brief This structure is deprecated. * * Use the kobj(9) macro DEFINE_CLASS to * declare classes which implement device drivers. */ struct driver { KOBJ_CLASS_FIELDS; }; /** * @brief A resource mapping. */ struct resource_map { bus_space_tag_t r_bustag; bus_space_handle_t r_bushandle; bus_size_t r_size; void *r_vaddr; }; /** * @brief Optional properties of a resource mapping request. */ struct resource_map_request { size_t size; rman_res_t offset; rman_res_t length; vm_memattr_t memattr; }; void resource_init_map_request_impl(struct resource_map_request *_args, size_t _sz); #define resource_init_map_request(rmr) \ resource_init_map_request_impl((rmr), sizeof(*(rmr))) /* * Definitions for drivers which need to keep simple lists of resources * for their child devices. */ struct resource; /** * @brief An entry for a single resource in a resource list. */ struct resource_list_entry { STAILQ_ENTRY(resource_list_entry) link; int type; /**< @brief type argument to alloc_resource */ int rid; /**< @brief resource identifier */ int flags; /**< @brief resource flags */ struct resource *res; /**< @brief the real resource when allocated */ rman_res_t start; /**< @brief start of resource range */ rman_res_t end; /**< @brief end of resource range */ rman_res_t count; /**< @brief count within range */ }; STAILQ_HEAD(resource_list, resource_list_entry); #define RLE_RESERVED 0x0001 /* Reserved by the parent bus. */ #define RLE_ALLOCATED 0x0002 /* Reserved resource is allocated. */ #define RLE_PREFETCH 0x0004 /* Resource is a prefetch range. */ void resource_list_init(struct resource_list *rl); void resource_list_free(struct resource_list *rl); struct resource_list_entry * resource_list_add(struct resource_list *rl, int type, int rid, rman_res_t start, rman_res_t end, rman_res_t count); int resource_list_add_next(struct resource_list *rl, int type, rman_res_t start, rman_res_t end, rman_res_t count); int resource_list_busy(struct resource_list *rl, int type, int rid); int resource_list_reserved(struct resource_list *rl, int type, int rid); struct resource_list_entry* resource_list_find(struct resource_list *rl, int type, int rid); void resource_list_delete(struct resource_list *rl, int type, int rid); struct resource * resource_list_alloc(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int resource_list_release(struct resource_list *rl, device_t bus, device_t child, int type, int rid, struct resource *res); int resource_list_release_active(struct resource_list *rl, device_t bus, device_t child, int type); struct resource * resource_list_reserve(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int resource_list_unreserve(struct resource_list *rl, device_t bus, device_t child, int type, int rid); void resource_list_purge(struct resource_list *rl); int resource_list_print_type(struct resource_list *rl, const char *name, int type, const char *format); /* * The root bus, to which all top-level buses are attached. */ extern device_t root_bus; extern devclass_t root_devclass; void root_bus_configure(void); /* * Useful functions for implementing buses. */ struct _cpuset; int bus_generic_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); device_t bus_generic_add_child(device_t dev, u_int order, const char *name, int unit); int bus_generic_adjust_resource(device_t bus, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end); struct resource * bus_generic_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int bus_generic_translate_resource(device_t dev, int type, rman_res_t start, rman_res_t *newstart); int bus_generic_attach(device_t dev); int bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu); +int bus_generic_child_location(device_t dev, device_t child, struct sbuf *sb); +int bus_generic_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb); int bus_generic_child_present(device_t dev, device_t child); int bus_generic_config_intr(device_t, int, enum intr_trigger, enum intr_polarity); int bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr); int bus_generic_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r); int bus_generic_detach(device_t dev); void bus_generic_driver_added(device_t dev, driver_t *driver); int bus_generic_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, struct _cpuset *cpuset); bus_dma_tag_t bus_generic_get_dma_tag(device_t dev, device_t child); bus_space_tag_t bus_generic_get_bus_tag(device_t dev, device_t child); int bus_generic_get_domain(device_t dev, device_t child, int *domain); struct resource_list * bus_generic_get_resource_list(device_t, device_t); int bus_generic_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map); void bus_generic_new_pass(device_t dev); int bus_print_child_header(device_t dev, device_t child); int bus_print_child_domain(device_t dev, device_t child); int bus_print_child_footer(device_t dev, device_t child); int bus_generic_print_child(device_t dev, device_t child); int bus_generic_probe(device_t dev); int bus_generic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); int bus_generic_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r); int bus_generic_resume(device_t dev); int bus_generic_resume_child(device_t dev, device_t child); int bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep); struct resource * bus_generic_rl_alloc_resource (device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); void bus_generic_rl_delete_resource (device_t, device_t, int, int); int bus_generic_rl_get_resource (device_t, device_t, int, int, rman_res_t *, rman_res_t *); int bus_generic_rl_set_resource (device_t, device_t, int, int, rman_res_t, rman_res_t); int bus_generic_rl_release_resource (device_t, device_t, int, int, struct resource *); int bus_generic_shutdown(device_t dev); int bus_generic_suspend(device_t dev); int bus_generic_suspend_child(device_t dev, device_t child); int bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie); int bus_generic_suspend_intr(device_t dev, device_t child, struct resource *irq); int bus_generic_resume_intr(device_t dev, device_t child, struct resource *irq); int bus_generic_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map); int bus_generic_write_ivar(device_t dev, device_t child, int which, uintptr_t value); int bus_helper_reset_post(device_t dev, int flags); int bus_helper_reset_prepare(device_t dev, int flags); int bus_null_rescan(device_t dev); /* * Wrapper functions for the BUS_*_RESOURCE methods to make client code * a little simpler. */ struct resource_spec { int type; int rid; int flags; }; #define RESOURCE_SPEC_END {-1, 0, 0} int bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res); void bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res); int bus_adjust_resource(device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end); struct resource *bus_alloc_resource(device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int bus_activate_resource(device_t dev, int type, int rid, struct resource *r); int bus_deactivate_resource(device_t dev, int type, int rid, struct resource *r); int bus_map_resource(device_t dev, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map); int bus_unmap_resource(device_t dev, int type, struct resource *r, struct resource_map *map); int bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize, struct _cpuset *cpuset); bus_dma_tag_t bus_get_dma_tag(device_t dev); bus_space_tag_t bus_get_bus_tag(device_t dev); int bus_get_domain(device_t dev, int *domain); int bus_release_resource(device_t dev, int type, int rid, struct resource *r); int bus_free_resource(device_t dev, int type, struct resource *r); int bus_setup_intr(device_t dev, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep); int bus_teardown_intr(device_t dev, struct resource *r, void *cookie); int bus_suspend_intr(device_t dev, struct resource *r); int bus_resume_intr(device_t dev, struct resource *r); int bus_bind_intr(device_t dev, struct resource *r, int cpu); int bus_describe_intr(device_t dev, struct resource *irq, void *cookie, const char *fmt, ...) __printflike(4, 5); int bus_set_resource(device_t dev, int type, int rid, rman_res_t start, rman_res_t count); int bus_get_resource(device_t dev, int type, int rid, rman_res_t *startp, rman_res_t *countp); rman_res_t bus_get_resource_start(device_t dev, int type, int rid); rman_res_t bus_get_resource_count(device_t dev, int type, int rid); void bus_delete_resource(device_t dev, int type, int rid); int bus_child_present(device_t child); -int bus_child_pnpinfo_str(device_t child, char *buf, size_t buflen); -int bus_child_location_str(device_t child, char *buf, size_t buflen); +int bus_child_pnpinfo(device_t child, struct sbuf *sb); +int bus_child_location(device_t child, struct sbuf *sb); void bus_enumerate_hinted_children(device_t bus); int bus_delayed_attach_children(device_t bus); static __inline struct resource * bus_alloc_resource_any(device_t dev, int type, int *rid, u_int flags) { return (bus_alloc_resource(dev, type, rid, 0, ~0, 1, flags)); } static __inline struct resource * bus_alloc_resource_anywhere(device_t dev, int type, int *rid, rman_res_t count, u_int flags) { return (bus_alloc_resource(dev, type, rid, 0, ~0, count, flags)); } /* * Access functions for device. */ device_t device_add_child(device_t dev, const char *name, int unit); device_t device_add_child_ordered(device_t dev, u_int order, const char *name, int unit); void device_busy(device_t dev); int device_delete_child(device_t dev, device_t child); int device_delete_children(device_t dev); int device_attach(device_t dev); int device_detach(device_t dev); void device_disable(device_t dev); void device_enable(device_t dev); device_t device_find_child(device_t dev, const char *classname, int unit); const char *device_get_desc(device_t dev); devclass_t device_get_devclass(device_t dev); driver_t *device_get_driver(device_t dev); u_int32_t device_get_flags(device_t dev); device_t device_get_parent(device_t dev); int device_get_children(device_t dev, device_t **listp, int *countp); void *device_get_ivars(device_t dev); void device_set_ivars(device_t dev, void *ivars); const char *device_get_name(device_t dev); const char *device_get_nameunit(device_t dev); void *device_get_softc(device_t dev); device_state_t device_get_state(device_t dev); int device_get_unit(device_t dev); struct sysctl_ctx_list *device_get_sysctl_ctx(device_t dev); struct sysctl_oid *device_get_sysctl_tree(device_t dev); int device_has_quiet_children(device_t dev); int device_is_alive(device_t dev); /* did probe succeed? */ int device_is_attached(device_t dev); /* did attach succeed? */ int device_is_enabled(device_t dev); int device_is_suspended(device_t dev); int device_is_quiet(device_t dev); device_t device_lookup_by_name(const char *name); int device_print_prettyname(device_t dev); int device_printf(device_t dev, const char *, ...) __printflike(2, 3); int device_log(device_t dev, int pri, const char *, ...) __printflike(3, 4); int device_probe(device_t dev); int device_probe_and_attach(device_t dev); int device_probe_child(device_t bus, device_t dev); int device_quiesce(device_t dev); void device_quiet(device_t dev); void device_quiet_children(device_t dev); void device_set_desc(device_t dev, const char* desc); void device_set_desc_copy(device_t dev, const char* desc); int device_set_devclass(device_t dev, const char *classname); int device_set_devclass_fixed(device_t dev, const char *classname); bool device_is_devclass_fixed(device_t dev); int device_set_driver(device_t dev, driver_t *driver); void device_set_flags(device_t dev, u_int32_t flags); void device_set_softc(device_t dev, void *softc); void device_free_softc(void *softc); void device_claim_softc(device_t dev); int device_set_unit(device_t dev, int unit); /* XXX DONT USE XXX */ int device_shutdown(device_t dev); void device_unbusy(device_t dev); void device_verbose(device_t dev); /* * Access functions for devclass. */ int devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp); devclass_t devclass_create(const char *classname); int devclass_delete_driver(devclass_t busclass, driver_t *driver); devclass_t devclass_find(const char *classname); const char *devclass_get_name(devclass_t dc); device_t devclass_get_device(devclass_t dc, int unit); void *devclass_get_softc(devclass_t dc, int unit); int devclass_get_devices(devclass_t dc, device_t **listp, int *countp); int devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp); int devclass_get_count(devclass_t dc); int devclass_get_maxunit(devclass_t dc); int devclass_find_free_unit(devclass_t dc, int unit); void devclass_set_parent(devclass_t dc, devclass_t pdc); devclass_t devclass_get_parent(devclass_t dc); struct sysctl_ctx_list *devclass_get_sysctl_ctx(devclass_t dc); struct sysctl_oid *devclass_get_sysctl_tree(devclass_t dc); /* * Access functions for device resources. */ int resource_int_value(const char *name, int unit, const char *resname, int *result); int resource_long_value(const char *name, int unit, const char *resname, long *result); int resource_string_value(const char *name, int unit, const char *resname, const char **result); int resource_disabled(const char *name, int unit); int resource_find_match(int *anchor, const char **name, int *unit, const char *resname, const char *value); int resource_find_dev(int *anchor, const char *name, int *unit, const char *resname, const char *value); int resource_unset_value(const char *name, int unit, const char *resname); /* * Functions for maintaining and checking consistency of * bus information exported to userspace. */ int bus_data_generation_check(int generation); void bus_data_generation_update(void); /** * Some convenience defines for probe routines to return. These are just * suggested values, and there's nothing magical about them. * BUS_PROBE_SPECIFIC is for devices that cannot be reprobed, and that no * possible other driver may exist (typically legacy drivers who don't follow * all the rules, or special needs drivers). BUS_PROBE_VENDOR is the * suggested value that vendor supplied drivers use. This is for source or * binary drivers that are not yet integrated into the FreeBSD tree. Its use * in the base OS is prohibited. BUS_PROBE_DEFAULT is the normal return value * for drivers to use. It is intended that nearly all of the drivers in the * tree should return this value. BUS_PROBE_LOW_PRIORITY are for drivers that * have special requirements like when there are two drivers that support * overlapping series of hardware devices. In this case the one that supports * the older part of the line would return this value, while the one that * supports the newer ones would return BUS_PROBE_DEFAULT. BUS_PROBE_GENERIC * is for drivers that wish to have a generic form and a specialized form, * like is done with the pci bus and the acpi pci bus. BUS_PROBE_HOOVER is * for those buses that implement a generic device placeholder for devices on * the bus that have no more specific driver for them (aka ugen). * BUS_PROBE_NOWILDCARD or lower means that the device isn't really bidding * for a device node, but accepts only devices that its parent has told it * use this driver. */ #define BUS_PROBE_SPECIFIC 0 /* Only I can use this device */ #define BUS_PROBE_VENDOR (-10) /* Vendor supplied driver */ #define BUS_PROBE_DEFAULT (-20) /* Base OS default driver */ #define BUS_PROBE_LOW_PRIORITY (-40) /* Older, less desirable drivers */ #define BUS_PROBE_GENERIC (-100) /* generic driver for dev */ #define BUS_PROBE_HOOVER (-1000000) /* Driver for any dev on bus */ #define BUS_PROBE_NOWILDCARD (-2000000000) /* No wildcard device matches */ /** * During boot, the device tree is scanned multiple times. Each scan, * or pass, drivers may be attached to devices. Each driver * attachment is assigned a pass number. Drivers may only probe and * attach to devices if their pass number is less than or equal to the * current system-wide pass number. The default pass is the last pass * and is used by most drivers. Drivers needed by the scheduler are * probed in earlier passes. */ #define BUS_PASS_ROOT 0 /* Used to attach root0. */ #define BUS_PASS_BUS 10 /* Buses and bridges. */ #define BUS_PASS_CPU 20 /* CPU devices. */ #define BUS_PASS_RESOURCE 30 /* Resource discovery. */ #define BUS_PASS_INTERRUPT 40 /* Interrupt controllers. */ #define BUS_PASS_TIMER 50 /* Timers and clocks. */ #define BUS_PASS_SCHEDULER 60 /* Start scheduler. */ #define BUS_PASS_SUPPORTDEV 100000 /* Drivers which support DEFAULT drivers. */ #define BUS_PASS_DEFAULT __INT_MAX /* Everything else. */ #define BUS_PASS_ORDER_FIRST 0 #define BUS_PASS_ORDER_EARLY 2 #define BUS_PASS_ORDER_MIDDLE 5 #define BUS_PASS_ORDER_LATE 7 #define BUS_PASS_ORDER_LAST 9 extern int bus_current_pass; void bus_set_pass(int pass); /** * Shorthands for constructing method tables. */ #define DEVMETHOD KOBJMETHOD #define DEVMETHOD_END KOBJMETHOD_END /* * Some common device interfaces. */ #include "device_if.h" #include "bus_if.h" struct module; int driver_module_handler(struct module *, int, void *); /** * Module support for automatically adding drivers to buses. */ struct driver_module_data { int (*dmd_chainevh)(struct module *, int, void *); void *dmd_chainarg; const char *dmd_busname; kobj_class_t dmd_driver; devclass_t *dmd_devclass; int dmd_pass; }; #define EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, devclass, \ evh, arg, order, pass) \ \ static struct driver_module_data name##_##busname##_driver_mod = { \ evh, arg, \ #busname, \ (kobj_class_t) &driver, \ &devclass, \ pass \ }; \ \ static moduledata_t name##_##busname##_mod = { \ #busname "/" #name, \ driver_module_handler, \ &name##_##busname##_driver_mod \ }; \ DECLARE_MODULE(name##_##busname, name##_##busname##_mod, \ SI_SUB_DRIVERS, order) #define EARLY_DRIVER_MODULE(name, busname, driver, devclass, evh, arg, pass) \ EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, devclass, \ evh, arg, SI_ORDER_MIDDLE, pass) #define DRIVER_MODULE_ORDERED(name, busname, driver, devclass, evh, arg,\ order) \ EARLY_DRIVER_MODULE_ORDERED(name, busname, driver, devclass, \ evh, arg, order, BUS_PASS_DEFAULT) #define DRIVER_MODULE(name, busname, driver, devclass, evh, arg) \ EARLY_DRIVER_MODULE(name, busname, driver, devclass, evh, arg, \ BUS_PASS_DEFAULT) /** * Generic ivar accessor generation macros for bus drivers */ #define __BUS_ACCESSOR(varp, var, ivarp, ivar, type) \ \ static __inline type varp ## _get_ ## var(device_t dev) \ { \ uintptr_t v; \ int e; \ e = BUS_READ_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, &v); \ KASSERT(e == 0, ("%s failed for %s on bus %s, error = %d", \ __func__, device_get_nameunit(dev), \ device_get_nameunit(device_get_parent(dev)), e)); \ return ((type) v); \ } \ \ static __inline void varp ## _set_ ## var(device_t dev, type t) \ { \ uintptr_t v = (uintptr_t) t; \ int e; \ e = BUS_WRITE_IVAR(device_get_parent(dev), dev, \ ivarp ## _IVAR_ ## ivar, v); \ KASSERT(e == 0, ("%s failed for %s on bus %s, error = %d", \ __func__, device_get_nameunit(dev), \ device_get_nameunit(device_get_parent(dev)), e)); \ } /** * Shorthand macros, taking resource argument * Generated with sys/tools/bus_macro.sh */ #define bus_barrier(r, o, l, f) \ bus_space_barrier((r)->r_bustag, (r)->r_bushandle, (o), (l), (f)) #define bus_poke_1(r, o, v) \ bus_space_poke_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_1(r, o, vp) \ bus_space_peek_1((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_1(r, o) \ bus_space_read_1((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_1(r, o, d, c) \ bus_space_read_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_1(r, o, d, c) \ bus_space_read_region_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_1(r, o, v, c) \ bus_space_set_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_1(r, o, v, c) \ bus_space_set_region_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_1(r, o, v) \ bus_space_write_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_1(r, o, d, c) \ bus_space_write_multi_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_1(r, o, d, c) \ bus_space_write_region_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_1(r, o) \ bus_space_read_stream_1((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_1(r, o, d, c) \ bus_space_read_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_1(r, o, d, c) \ bus_space_read_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_1(r, o, v, c) \ bus_space_set_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_1(r, o, v, c) \ bus_space_set_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_1(r, o, v) \ bus_space_write_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_1(r, o, d, c) \ bus_space_write_multi_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_1(r, o, d, c) \ bus_space_write_region_stream_1((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_2(r, o, v) \ bus_space_poke_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_2(r, o, vp) \ bus_space_peek_2((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_2(r, o) \ bus_space_read_2((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_2(r, o, d, c) \ bus_space_read_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_2(r, o, d, c) \ bus_space_read_region_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_2(r, o, v, c) \ bus_space_set_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_2(r, o, v, c) \ bus_space_set_region_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_2(r, o, v) \ bus_space_write_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_2(r, o, d, c) \ bus_space_write_multi_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_2(r, o, d, c) \ bus_space_write_region_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_2(r, o) \ bus_space_read_stream_2((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_2(r, o, d, c) \ bus_space_read_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_2(r, o, d, c) \ bus_space_read_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_2(r, o, v, c) \ bus_space_set_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_2(r, o, v, c) \ bus_space_set_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_2(r, o, v) \ bus_space_write_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_2(r, o, d, c) \ bus_space_write_multi_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_2(r, o, d, c) \ bus_space_write_region_stream_2((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_4(r, o, v) \ bus_space_poke_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_4(r, o, vp) \ bus_space_peek_4((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_4(r, o) \ bus_space_read_4((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_4(r, o, d, c) \ bus_space_read_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_4(r, o, d, c) \ bus_space_read_region_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_4(r, o, v, c) \ bus_space_set_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_4(r, o, v, c) \ bus_space_set_region_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_4(r, o, v) \ bus_space_write_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_4(r, o, d, c) \ bus_space_write_multi_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_4(r, o, d, c) \ bus_space_write_region_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_4(r, o) \ bus_space_read_stream_4((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_4(r, o, d, c) \ bus_space_read_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_4(r, o, d, c) \ bus_space_read_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_4(r, o, v, c) \ bus_space_set_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_4(r, o, v, c) \ bus_space_set_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_4(r, o, v) \ bus_space_write_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_4(r, o, d, c) \ bus_space_write_multi_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_4(r, o, d, c) \ bus_space_write_region_stream_4((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_poke_8(r, o, v) \ bus_space_poke_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_peek_8(r, o, vp) \ bus_space_peek_8((r)->r_bustag, (r)->r_bushandle, (o), (vp)) #define bus_read_8(r, o) \ bus_space_read_8((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_8(r, o, d, c) \ bus_space_read_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_8(r, o, d, c) \ bus_space_read_region_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_8(r, o, v, c) \ bus_space_set_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_8(r, o, v, c) \ bus_space_set_region_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_8(r, o, v) \ bus_space_write_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_8(r, o, d, c) \ bus_space_write_multi_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_8(r, o, d, c) \ bus_space_write_region_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_stream_8(r, o) \ bus_space_read_stream_8((r)->r_bustag, (r)->r_bushandle, (o)) #define bus_read_multi_stream_8(r, o, d, c) \ bus_space_read_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_read_region_stream_8(r, o, d, c) \ bus_space_read_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_set_multi_stream_8(r, o, v, c) \ bus_space_set_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_set_region_stream_8(r, o, v, c) \ bus_space_set_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v), (c)) #define bus_write_stream_8(r, o, v) \ bus_space_write_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (v)) #define bus_write_multi_stream_8(r, o, d, c) \ bus_space_write_multi_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #define bus_write_region_stream_8(r, o, d, c) \ bus_space_write_region_stream_8((r)->r_bustag, (r)->r_bushandle, (o), (d), (c)) #endif /* _KERNEL */ #endif /* !_SYS_BUS_H_ */