Index: arm/broadcom/bcm2835/bcm2835_spi.c =================================================================== --- arm/broadcom/bcm2835/bcm2835_spi.c +++ arm/broadcom/bcm2835/bcm2835_spi.c @@ -107,6 +107,25 @@ BCM_SPI_WRITE(sc, off, reg); } +/* + * Set the clock speed register and return the clock speed actually used, + * after corrections to fit within SPI_CORE_CLK. + */ +static uint32_t +bcm_spi_set_clock_speed(struct bcm_spi_softc *sc, const uint32_t clock_speed) +{ + uint32_t clk = SPI_CORE_CLK / clock_speed; + + if (clk <= 1) + clk = 2; + else if (clk % 2) + clk--; + if (clk > 0xffff) + clk = 0; + BCM_SPI_WRITE(sc, SPI_CLK, clk); + return (clk == 0 ? 0 : SPI_CORE_CLK / clk); +} + static int bcm_spi_clock_proc(SYSCTL_HANDLER_ARGS) { @@ -117,26 +136,19 @@ sc = (struct bcm_spi_softc *)arg1; BCM_SPI_LOCK(sc); - clk = BCM_SPI_READ(sc, SPI_CLK); + clk = sc->sc_clock_speed; BCM_SPI_UNLOCK(sc); - clk &= 0xffff; - if (clk == 0) - clk = 65536; - clk = SPI_CORE_CLK / clk; error = sysctl_handle_int(oidp, &clk, sizeof(clk), req); if (error != 0 || req->newptr == NULL) return (error); - clk = SPI_CORE_CLK / clk; - if (clk <= 1) - clk = 2; - else if (clk % 2) - clk--; - if (clk > 0xffff) - clk = 0; BCM_SPI_LOCK(sc); - BCM_SPI_WRITE(sc, SPI_CLK, clk); + if (sc->sc_flags & BCM_SPI_BUSY) { + BCM_SPI_UNLOCK(sc); + return (EBUSY); + } + sc->sc_clock_speed = bcm_spi_set_clock_speed(sc, clk); BCM_SPI_UNLOCK(sc); return (0); @@ -310,7 +322,8 @@ BCM_SPI_WRITE(sc, SPI_CS, SPI_CS_CLEAR_RXFIFO | SPI_CS_CLEAR_TXFIFO); /* Set the SPI clock to 500Khz. */ - BCM_SPI_WRITE(sc, SPI_CLK, SPI_CORE_CLK / 500000); + sc->sc_clock_speed = 500000; + BCM_SPI_WRITE(sc, SPI_CLK, SPI_CORE_CLK / sc->sc_clock_speed); #ifdef BCM_SPI_DEBUG bcm_spi_printr(dev); @@ -418,6 +431,7 @@ bcm_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct bcm_spi_softc *sc; + const uint32_t clock_speed_hz = cmd->clock_speed_hz; int cs, err; sc = device_get_softc(dev); @@ -450,6 +464,10 @@ SPI_CS_CLEAR_RXFIFO | SPI_CS_CLEAR_TXFIFO, SPI_CS_CLEAR_RXFIFO | SPI_CS_CLEAR_TXFIFO); + /* Switch clock speed if necessary. */ + if (clock_speed_hz != 0 && clock_speed_hz != sc->sc_clock_speed) + bcm_spi_set_clock_speed(sc, clock_speed_hz); + /* Save a pointer to the SPI command. */ sc->sc_cmd = cmd; sc->sc_read = 0; @@ -470,6 +488,10 @@ /* Make sure the SPI engine and interrupts are disabled. */ bcm_spi_modifyreg(sc, SPI_CS, SPI_CS_TA | SPI_CS_INTR | SPI_CS_INTD, 0); + /* Switch the clock speed back if necessary. */ + if (clock_speed_hz != 0 && clock_speed_hz != sc->sc_clock_speed) + bcm_spi_set_clock_speed(sc, sc->sc_clock_speed); + /* Release the controller and wakeup the next thread waiting for it. */ sc->sc_flags = 0; wakeup_one(dev); @@ -487,6 +509,7 @@ return (err); } + static phandle_t bcm_spi_get_node(device_t bus, device_t dev) { Index: arm/broadcom/bcm2835/bcm2835_spivar.h =================================================================== --- arm/broadcom/bcm2835/bcm2835_spivar.h +++ arm/broadcom/bcm2835/bcm2835_spivar.h @@ -54,6 +54,7 @@ uint32_t sc_read; uint32_t sc_flags; uint32_t sc_written; + uint32_t sc_clock_speed; void * sc_intrhand; }; Index: arm/conf/RPI2 =================================================================== --- arm/conf/RPI2 +++ arm/conf/RPI2 @@ -24,7 +24,7 @@ include "../broadcom/bcm2835/std.rpi" include "../broadcom/bcm2835/std.bcm2836" -options HZ=100 +options HZ=1000 options SCHED_ULE # ULE scheduler options SMP # Enable multiple cores options PLATFORM @@ -35,7 +35,7 @@ #options VERBOSE_SYSINIT # Enable verbose sysinit messages options KDB # Enable kernel debugger support # For minimum debugger support (stable branch) use: -#options KDB_TRACE # Print a stack trace for a panic +options KDB_TRACE # Print a stack trace for a panic # For full debugger support use this instead: options DDB # Enable the kernel debugger options INVARIANTS # Enable calls of extra sanity checking Index: arm/lpc/ssd1289.c =================================================================== --- arm/lpc/ssd1289.c +++ arm/lpc/ssd1289.c @@ -157,7 +157,8 @@ static __inline void ssd1289_spi_send(struct ssd1289_softc *sc, uint8_t *data, int len) { - struct spi_command cmd; + struct spi_command cmd = SPI_COMMAND_INITIALIZER; + uint8_t buffer[8]; cmd.tx_cmd = data; cmd.tx_cmd_sz = len; Index: conf/files =================================================================== --- conf/files +++ conf/files @@ -2462,6 +2462,7 @@ dev/spibus/ofw_spibus.c optional fdt spibus dev/spibus/spibus.c optional spibus \ dependency "spibus_if.h" +dev/spibus/spigen.c optional spibus dev/spibus/spibus_if.m optional spibus dev/ste/if_ste.c optional ste pci dev/stg/tmc18c30.c optional stg Index: dev/spibus/spi.h =================================================================== --- dev/spibus/spi.h +++ dev/spibus/spi.h @@ -27,6 +27,7 @@ */ struct spi_command { + uint32_t clock_speed_hz; void *tx_cmd; uint32_t tx_cmd_sz; void *rx_cmd; @@ -37,4 +38,6 @@ uint32_t rx_data_sz; }; +#define SPI_COMMAND_INITIALIZER { 0 } + #define SPI_CHIP_SELECT_HIGH 0x1 /* Chip select high (else low) */ Index: dev/spibus/spigen.c =================================================================== --- dev/spibus/spigen.c +++ dev/spibus/spigen.c @@ -0,0 +1,441 @@ +/*- + * Copyright (c) 2015 Brian Fundakowski Feldman. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "spibus_if.h" + +struct spigen_softc { + device_t sc_dev; + struct cdev *sc_cdev; + struct mtx sc_mtx; + uint32_t sc_clock_speed; + uint32_t sc_command_length_max; /* cannot change while mmapped */ + uint32_t sc_data_length_max; /* cannot change while mmapped */ + vm_object_t sc_mmap_buffer; /* command, then data */ + vm_offset_t sc_mmap_kvaddr; + size_t sc_mmap_buffer_size; + int sc_mmap_busy; +}; + +static int +spigen_probe(device_t dev) +{ + device_set_desc(dev, "SPI Generic IO"); + return (0); +} + +static int spigen_open(struct cdev *, int, int, struct thread *); +static int spigen_ioctl(struct cdev *, u_long, caddr_t, int, struct thread *); +static int spigen_close(struct cdev *, int, int, struct thread *); +static d_mmap_single_t spigen_mmap_single; + +static struct cdevsw spigen_cdevsw = { + .d_version = D_VERSION, + .d_name = "spigen", + .d_open = spigen_open, + .d_ioctl = spigen_ioctl, + .d_mmap_single = spigen_mmap_single, + .d_close = spigen_close +}; + +static int +spigen_command_length_max_proc(SYSCTL_HANDLER_ARGS) +{ + struct spigen_softc *sc = (struct spigen_softc *)arg1; + uint32_t command_length_max; + int error; + + mtx_lock(&sc->sc_mtx); + command_length_max = sc->sc_command_length_max; + mtx_unlock(&sc->sc_mtx); + error = sysctl_handle_int(oidp, &command_length_max, + sizeof(command_length_max), req); + if (error == 0 && req->newptr != NULL) { + mtx_lock(&sc->sc_mtx); + if (sc->sc_mmap_buffer != NULL) + error = EBUSY; + else + sc->sc_command_length_max = command_length_max; + mtx_unlock(&sc->sc_mtx); + } + return (error); +} + +static int +spigen_data_length_max_proc(SYSCTL_HANDLER_ARGS) +{ + struct spigen_softc *sc = (struct spigen_softc *)arg1; + uint32_t data_length_max; + int error; + + mtx_lock(&sc->sc_mtx); + data_length_max = sc->sc_data_length_max; + mtx_unlock(&sc->sc_mtx); + error = sysctl_handle_int(oidp, &data_length_max, + sizeof(data_length_max), req); + if (error == 0 && req->newptr != NULL) { + mtx_lock(&sc->sc_mtx); + if (sc->sc_mmap_buffer != NULL) + error = EBUSY; + else + sc->sc_data_length_max = data_length_max; + mtx_unlock(&sc->sc_mtx); + } + return (error); +} + +static void +spigen_sysctl_init(struct spigen_softc *sc) +{ + struct sysctl_ctx_list *ctx; + struct sysctl_oid *tree_node; + struct sysctl_oid_list *tree; + + /* + * Add system sysctl tree/handlers. + */ + ctx = device_get_sysctl_ctx(sc->sc_dev); + tree_node = device_get_sysctl_tree(sc->sc_dev); + tree = SYSCTL_CHILDREN(tree_node); + SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "command_length_max", + CTLFLAG_MPSAFE | CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc), + spigen_command_length_max_proc, "IU", "SPI command header portion (octets)"); + SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "data_length_max", + CTLFLAG_MPSAFE | CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc), + spigen_data_length_max_proc, "IU", "SPI data trailer portion (octets)"); +} + +static int +spigen_attach(device_t dev) +{ + struct spigen_softc *sc; + const int unit = device_get_unit(dev); + + sc = device_get_softc(dev); + sc->sc_dev = dev; + sc->sc_cdev = make_dev(&spigen_cdevsw, unit, + UID_ROOT, GID_OPERATOR, 0660, "spigen%d", unit); + sc->sc_cdev->si_drv1 = dev; + sc->sc_command_length_max = PAGE_SIZE; + sc->sc_data_length_max = PAGE_SIZE; + mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); + spigen_sysctl_init(sc); + + return (0); +} + +static int +spigen_open(struct cdev *dev, int oflags, int devtype, struct thread *td) +{ + + return (0); +} + +static int +spigen_transfer_clocked(struct cdev *cdev, struct spigen_transfer_clocked *stc) +{ + struct spi_command transfer = SPI_COMMAND_INITIALIZER; + device_t dev = cdev->si_drv1; + struct spigen_softc *sc = device_get_softc(dev); + int error = 0; + + mtx_lock(&sc->sc_mtx); + if (stc->stc_command.iov_len == 0 || stc->stc_data.iov_len == 0) + error = EINVAL; + else if (stc->stc_command.iov_len > sc->sc_command_length_max || + stc->stc_data.iov_len > sc->sc_data_length_max) + error = ENOMEM; + else if (stc->stc_clock_speed != 0) + transfer.clock_speed_hz = stc->stc_clock_speed; + else + transfer.clock_speed_hz = sc->sc_clock_speed; + mtx_unlock(&sc->sc_mtx); + if (error) + return (error); + +#if 0 + device_printf(dev, "cmd %p %u data %p %u\n", stc->stc_command.iov_base, + stc->stc_command.iov_len, stc->stc_data.iov_base, stc->stc_data.iov_len); +#endif + transfer.tx_cmd = transfer.rx_cmd = malloc(stc->stc_command.iov_len, + M_DEVBUF, M_WAITOK); + if (transfer.tx_cmd == NULL) + return (ENOMEM); + transfer.tx_data = transfer.rx_data = malloc(stc->stc_data.iov_len, + M_DEVBUF, M_WAITOK); + if (transfer.tx_data == NULL) { + free(transfer.tx_cmd, M_DEVBUF); + return (ENOMEM); + } + + error = copyin(stc->stc_command.iov_base, transfer.tx_cmd, + transfer.tx_cmd_sz = transfer.rx_cmd_sz = stc->stc_command.iov_len); + if (error == 0) + error = copyin(stc->stc_data.iov_base, transfer.tx_data, + transfer.tx_data_sz = transfer.rx_data_sz = + stc->stc_data.iov_len); + if (error == 0) + error = SPIBUS_TRANSFER(device_get_parent(dev), dev, &transfer); + if (error == 0) { + error = copyout(transfer.rx_cmd, stc->stc_command.iov_base, + transfer.rx_cmd_sz); + if (error == 0) + error = copyout(transfer.rx_data, + stc->stc_data.iov_base, transfer.rx_data_sz); + } + + free(transfer.tx_cmd, M_DEVBUF); + free(transfer.tx_data, M_DEVBUF); + return (error); +} + +static int +spigen_transfer(struct cdev *cdev, struct spigen_transfer *st) +{ + struct spigen_transfer_clocked stc = { + 0, + st->st_command, + st->st_data + }; + + return (spigen_transfer_clocked(cdev, &stc)); +} + +static int +spigen_transfer_mmapped_clocked(struct cdev *cdev, + struct spigen_transfer_mmapped_clocked *stmc) +{ + struct spi_command transfer = SPI_COMMAND_INITIALIZER; + device_t dev = cdev->si_drv1; + struct spigen_softc *sc = device_get_softc(dev); + int error = 0; + + mtx_lock(&sc->sc_mtx); + if (sc->sc_mmap_busy) + error = EBUSY; + else if (stmc->stmc_command_length > sc->sc_command_length_max || + stmc->stmc_data_length > sc->sc_data_length_max) + error = E2BIG; + else if (sc->sc_mmap_buffer == NULL) + error = EINVAL; + else if (sc->sc_mmap_buffer_size < + stmc->stmc_command_length + stmc->stmc_data_length) + error = ENOMEM; + else if (stmc->stmc_clock_speed != 0) + transfer.clock_speed_hz = stmc->stmc_clock_speed; + else + transfer.clock_speed_hz = sc->sc_clock_speed; + if (error == 0) + sc->sc_mmap_busy = 1; + mtx_unlock(&sc->sc_mtx); + if (error) + return (error); + + transfer.tx_cmd = transfer.rx_cmd = (void *)sc->sc_mmap_kvaddr; + transfer.tx_cmd_sz = transfer.rx_cmd_sz = stmc->stmc_command_length; + transfer.tx_data = transfer.rx_data = + (void *)(sc->sc_mmap_kvaddr + stmc->stmc_command_length); + transfer.tx_data_sz = transfer.rx_data_sz = stmc->stmc_data_length; + error = SPIBUS_TRANSFER(device_get_parent(dev), dev, &transfer); + + mtx_lock(&sc->sc_mtx); + KASSERT(sc->sc_mmap_busy, ("mmap no longer marked busy")); + sc->sc_mmap_busy = 0; + mtx_unlock(&sc->sc_mtx); + return (error); +} + +static int +spigen_transfer_mmapped(struct cdev *cdev, struct spigen_transfer_mmapped *stm) +{ + struct spigen_transfer_mmapped_clocked stmc = { + 0, + stm->stm_command_length, + stm->stm_data_length + }; + + return (spigen_transfer_mmapped_clocked(cdev, &stmc)); +} + +static int +spigen_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, + struct thread *td) +{ + device_t dev = cdev->si_drv1; + struct spigen_softc *sc = device_get_softc(dev); + int error; + + switch (cmd) { + case SPIGENIOC_TRANSFER: + error = spigen_transfer(cdev, + (struct spigen_transfer *)data); + break; + case SPIGENIOC_TRANSFER_CLOCKED: + error = spigen_transfer_clocked(cdev, + (struct spigen_transfer_clocked *)data); + break; + case SPIGENIOC_TRANSFER_MMAPPED: + error = spigen_transfer_mmapped(cdev, + (struct spigen_transfer_mmapped *)data); + break; + case SPIGENIOC_TRANSFER_MMAPPED_CLOCKED: + error = spigen_transfer_mmapped_clocked(cdev, + (struct spigen_transfer_mmapped_clocked *)data); + break; + case SPIGENIOC_GET_CLOCK_SPEED: + mtx_lock(&sc->sc_mtx); + *(uint32_t *)data = sc->sc_clock_speed; + mtx_unlock(&sc->sc_mtx); + error = 0; + break; + case SPIGENIOC_SET_CLOCK_SPEED: + mtx_lock(&sc->sc_mtx); + sc->sc_clock_speed = *(uint32_t *)data; + mtx_unlock(&sc->sc_mtx); + error = 0; + break; + default: + error = EOPNOTSUPP; + } + return (error); +} + +static int +spigen_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, + vm_size_t size, struct vm_object **object, int nprot) +{ + device_t dev = cdev->si_drv1; + struct spigen_softc *sc = device_get_softc(dev); + vm_page_t *m; + size_t n, pages; + + if (size == 0 || + (nprot & (PROT_EXEC | PROT_READ | PROT_WRITE)) + != (PROT_READ | PROT_WRITE)) + return (EINVAL); + size = roundup2(size, PAGE_SIZE); + pages = size / PAGE_SIZE; + + mtx_lock(&sc->sc_mtx); + if (sc->sc_mmap_buffer != NULL) { + mtx_unlock(&sc->sc_mtx); + return (EBUSY); + } else if (size > sc->sc_command_length_max + sc->sc_data_length_max) { + mtx_unlock(&sc->sc_mtx); + return (E2BIG); + } + sc->sc_mmap_buffer_size = size; + *offset = 0; + sc->sc_mmap_buffer = *object = vm_pager_allocate(OBJT_PHYS, 0, size, + nprot, *offset, curthread->td_ucred); + m = malloc(sizeof(*m) * pages, M_TEMP, M_WAITOK); + VM_OBJECT_WLOCK(*object); + vm_object_reference_locked(*object); // kernel and userland both + for (n = 0; n < pages; n++) { + m[n] = vm_page_grab(*object, n, + VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED); + m[n]->valid = VM_PAGE_BITS_ALL; + } + VM_OBJECT_WUNLOCK(*object); + sc->sc_mmap_kvaddr = kva_alloc(size); + pmap_qenter(sc->sc_mmap_kvaddr, m, pages); + free(m, M_TEMP); + mtx_unlock(&sc->sc_mtx); + + if (*object == NULL) + return (EINVAL); + return (0); +} + +static int +spigen_close(struct cdev *cdev, int fflag, int devtype, struct thread *td) +{ + device_t dev = cdev->si_drv1; + struct spigen_softc *sc = device_get_softc(dev); + + mtx_lock(&sc->sc_mtx); + if (sc->sc_mmap_buffer != NULL) { + pmap_qremove(sc->sc_mmap_kvaddr, + sc->sc_mmap_buffer_size / PAGE_SIZE); + kva_free(sc->sc_mmap_kvaddr, sc->sc_mmap_buffer_size); + sc->sc_mmap_kvaddr = 0; + vm_object_deallocate(sc->sc_mmap_buffer); + sc->sc_mmap_buffer = NULL; + sc->sc_mmap_buffer_size = 0; + } + mtx_unlock(&sc->sc_mtx); + return (0); +} + +static int +spigen_detach(device_t dev) +{ + + return (EIO); +} + +static devclass_t spigen_devclass; + +static device_method_t spigen_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, spigen_probe), + DEVMETHOD(device_attach, spigen_attach), + DEVMETHOD(device_detach, spigen_detach), + + { 0, 0 } +}; + +static driver_t spigen_driver = { + "spigen", + spigen_methods, + sizeof(struct spigen_softc), +}; + +DRIVER_MODULE(spigen, spibus, spigen_driver, spigen_devclass, 0, 0); Index: sys/spigenio.h =================================================================== --- sys/spigenio.h +++ sys/spigenio.h @@ -0,0 +1,67 @@ +/*- + * Copyright (c) 2000 Doug Rabson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _SYS_SPIGENIO_H_ +#define _SYS_SPIGENIO_H_ + +#include + +struct spigen_transfer { + struct iovec st_command; /* master to slave */ + struct iovec st_data; /* slave to master and/or master to slave */ +}; +struct spigen_transfer_clocked { + uint32_t stc_clock_speed; /* SPI clock rate in Hz */ + struct iovec stc_command; /* master to slave */ + struct iovec stc_data; /* slave to master and/or master to slave */ +}; + +/* Transfer lengths are in octets. */ +struct spigen_transfer_mmapped { + size_t stm_command_length; /* at offset 0 in mmap(2) area */ + size_t stm_data_length; /* at offset stm_command_length */ +}; +struct spigen_transfer_mmapped_clocked { + uint32_t stmc_clock_speed; /* SPI clock rate in Hz */ + size_t stmc_command_length; /* at offset 0 in mmap(2) area */ + size_t stmc_data_length; /* at offset stm_command_length */ +}; + +#define SPIGENIOC_BASE 'S' +#define SPIGENIOC_TRANSFER _IOW(SPIGENIOC_BASE, 0, \ + struct spigen_transfer) +#define SPIGENIOC_TRANSFER_MMAPPED _IOW(SPIGENIOC_BASE, 1, \ + struct spigen_transfer_mmapped) +#define SPIGENIOC_GET_CLOCK_SPEED _IOR(SPIGENIOC_BASE, 2, uint32_t) +#define SPIGENIOC_SET_CLOCK_SPEED _IOW(SPIGENIOC_BASE, 3, uint32_t) +#define SPIGENIOC_TRANSFER_CLOCKED _IOW(SPIGENIOC_BASE, 4, \ + struct spigen_transfer_clocked) +#define SPIGENIOC_TRANSFER_MMAPPED_CLOCKED _IOW(SPIGENIOC_BASE, 5, \ + struct spigen_transfer_mmapped_clocked) + +#endif /* !_SYS_SPIGENIO_H_ */