Index: projects/clang700-import/lib/csu/common/crtend.c =================================================================== --- projects/clang700-import/lib/csu/common/crtend.c (revision 340917) +++ projects/clang700-import/lib/csu/common/crtend.c (revision 340918) @@ -1,64 +1,67 @@ /*- * SPDX-License-Identifier: BSD-1-Clause * * Copyright 2018 Andrew Turner * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "crt.h" #ifdef HAVE_CTORS typedef void (*crt_func)(void); /* * On some architectures and toolchains we may need to call the .ctors. * These are called in the reverse order they are in the ELF file. */ static void __do_global_ctors_aux(void) __used; static crt_func __CTOR_END__[] __section(".ctors") __used = { (crt_func)0 }; static crt_func __DTOR_END__[] __section(".dtors") __used = { (crt_func)0 }; +static crt_func __JCR_LIST__[] __section(".jcr") __used = { + (crt_func)0 +}; static void __do_global_ctors_aux(void) { crt_func fn; int n; for (n = 1;; n++) { fn = __CTOR_END__[-n]; if (fn == (crt_func)0 || fn == (crt_func)-1) break; fn(); } } asm ( ".pushsection .init \n" "\t" INIT_CALL_SEQ(__do_global_ctors_aux) "\n" ".popsection \n" ); #endif Index: projects/clang700-import/stand/i386/pxeldr/pxeboot.8 =================================================================== --- projects/clang700-import/stand/i386/pxeldr/pxeboot.8 (revision 340917) +++ projects/clang700-import/stand/i386/pxeldr/pxeboot.8 (revision 340918) @@ -1,158 +1,160 @@ .\" Copyright (c) 1999 Doug White .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" .Dd May 27, 2017 .Dt PXEBOOT 8 .Os .Sh NAME .Nm pxeboot .Nd Preboot Execution Environment (PXE) bootloader .Sh DESCRIPTION The .Nm bootloader is a modified version of the system third-stage bootstrap .Xr loader 8 configured to run under Intel's Preboot Execution Environment (PXE) system. PXE is a form of smart boot ROM, built into Intel EtherExpress Pro/100 and 3Com 3c905c Ethernet cards, and Ethernet-equipped Intel motherboards. PXE supports DHCP configuration and provides low-level NIC access services. .Pp The DHCP client will set a DHCP user class named .Va FreeBSD to allow flexible configuration of the DHCP server. .Pp The .Nm bootloader retrieves the kernel, modules, and other files either via NFS over UDP or by TFTP, selectable through compile-time options. In combination with a memory file system image or NFS-mounted root file system, .Nm allows for easy, EEPROM-burner free construction of diskless machines. .Pp The .Nm binary is loaded just like any other boot file, by specifying it in the DHCP server's configuration file. -Below is a sample configuration for the ISC DHCP v2 server: +Below is a sample configuration for the ISC DHCP v3 server: .Bd -literal -offset indent option domain-name "example.com"; option routers 10.0.0.1; option subnet-mask 255.255.255.0; option broadcast-address 10.0.0.255; option domain-name-servers 10.0.0.1; server-name "DHCPserver"; server-identifier 10.0.0.1; +next-server 10.0.0.1; default-lease-time 120; max-lease-time 120; subnet 10.0.0.0 netmask 255.255.255.0 { filename "pxeboot"; range 10.0.0.10 10.0.0.254; if exists user-class and option user-class = "FreeBSD" { option root-path "tftp://10.0.0.1/FreeBSD"; } } .Ed +.Va next-server +is the IP address of the next server in the bootstrap process, i.e. +your TFTP server or NFS server. .Nm recognizes -.Va next-server -and .Va option root-path directives as the server and path to NFS mount for file requests, respectively, or the server to make TFTP requests to. Note that .Nm expects to fetch .Pa /boot/loader.rc from the specified server before loading any other files. .Pp Valid .Va option root-path Syntax is the following .Bl -tag -width ://ip/path indent .It /path path to the root filesystem on the NFS server .It ip:/path path to the root filesystem on the NFS server .Ar ip .It nfs:/path path to the root filesystem on the NFS server .It nfs://ip/path path to the root filesystem on the NFS server .Ar ip .It tftp:/path path to the root filesystem on the TFTP server .It tftp://ip/path path to the root filesystem on the TFTP server .Ar ip .El .Pp .Nm defaults to a conservative 1024 byte NFS data packet size. This may be changed by setting the .Va nfs.read_size variable in .Pa /boot/loader.conf . Valid values range from 1024 to 16384 bytes. .Pp In all other respects, .Nm acts just like .Xr loader 8 . .Pp As PXE is still in its infancy, some firmware versions may not work properly. The .Nm bootloader has been extensively tested on version 0.99 of Intel firmware; pre-release versions of the newer 2.0 firmware are known to have problems. Check with the device's manufacturer for their latest stable release. .Pp For further information on Intel's PXE specifications and Wired for Management (WfM) systems, see .Li http://www.intel.com/design/archives/wfm/ . .Sh SEE ALSO .Xr loader 8 .Sh HISTORY The .Nm bootloader first appeared in .Fx 4.1 . .Sh AUTHORS .An -nosplit The .Nm bootloader was written by .An John Baldwin Aq jhb@FreeBSD.org and .An Paul Saab Aq ps@FreeBSD.org . This manual page was written by .An Doug White Aq dwhite@FreeBSD.org . Index: projects/clang700-import/sys/compat/linux/linux_event.c =================================================================== --- projects/clang700-import/sys/compat/linux/linux_event.c (revision 340917) +++ projects/clang700-import/sys/compat/linux/linux_event.c (revision 340918) @@ -1,1328 +1,1328 @@ /*- * Copyright (c) 2007 Roman Divacky * Copyright (c) 2014 Dmitry Chagin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef COMPAT_LINUX32 #include #include #else #include #include #endif #include #include #include #include #include /* * epoll defines 'struct epoll_event' with the field 'data' as 64 bits * on all architectures. But on 32 bit architectures BSD 'struct kevent' only * has 32 bit opaque pointer as 'udata' field. So we can't pass epoll supplied * data verbatuim. Therefore we allocate 64-bit memory block to pass * user supplied data for every file descriptor. */ typedef uint64_t epoll_udata_t; struct epoll_emuldata { uint32_t fdc; /* epoll udata max index */ epoll_udata_t udata[1]; /* epoll user data vector */ }; #define EPOLL_DEF_SZ 16 #define EPOLL_SIZE(fdn) \ (sizeof(struct epoll_emuldata)+(fdn) * sizeof(epoll_udata_t)) struct epoll_event { uint32_t events; epoll_udata_t data; } #if defined(__amd64__) __attribute__((packed)) #endif ; #define LINUX_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) static void epoll_fd_install(struct thread *td, int fd, epoll_udata_t udata); static int epoll_to_kevent(struct thread *td, struct file *epfp, int fd, struct epoll_event *l_event, int *kev_flags, struct kevent *kevent, int *nkevents); static void kevent_to_epoll(struct kevent *kevent, struct epoll_event *l_event); static int epoll_kev_copyout(void *arg, struct kevent *kevp, int count); static int epoll_kev_copyin(void *arg, struct kevent *kevp, int count); static int epoll_delete_event(struct thread *td, struct file *epfp, int fd, int filter); static int epoll_delete_all_events(struct thread *td, struct file *epfp, int fd); struct epoll_copyin_args { struct kevent *changelist; }; struct epoll_copyout_args { struct epoll_event *leventlist; struct proc *p; uint32_t count; int error; }; /* eventfd */ typedef uint64_t eventfd_t; static fo_rdwr_t eventfd_read; static fo_rdwr_t eventfd_write; static fo_ioctl_t eventfd_ioctl; static fo_poll_t eventfd_poll; static fo_kqfilter_t eventfd_kqfilter; static fo_stat_t eventfd_stat; static fo_close_t eventfd_close; static fo_fill_kinfo_t eventfd_fill_kinfo; static struct fileops eventfdops = { .fo_read = eventfd_read, .fo_write = eventfd_write, .fo_truncate = invfo_truncate, .fo_ioctl = eventfd_ioctl, .fo_poll = eventfd_poll, .fo_kqfilter = eventfd_kqfilter, .fo_stat = eventfd_stat, .fo_close = eventfd_close, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, .fo_fill_kinfo = eventfd_fill_kinfo, .fo_flags = DFLAG_PASSABLE }; static void filt_eventfddetach(struct knote *kn); static int filt_eventfdread(struct knote *kn, long hint); static int filt_eventfdwrite(struct knote *kn, long hint); static struct filterops eventfd_rfiltops = { .f_isfd = 1, .f_detach = filt_eventfddetach, .f_event = filt_eventfdread }; static struct filterops eventfd_wfiltops = { .f_isfd = 1, .f_detach = filt_eventfddetach, .f_event = filt_eventfdwrite }; /* timerfd */ typedef uint64_t timerfd_t; static fo_rdwr_t timerfd_read; static fo_poll_t timerfd_poll; static fo_kqfilter_t timerfd_kqfilter; static fo_stat_t timerfd_stat; static fo_close_t timerfd_close; static fo_fill_kinfo_t timerfd_fill_kinfo; static struct fileops timerfdops = { .fo_read = timerfd_read, .fo_write = invfo_rdwr, .fo_truncate = invfo_truncate, .fo_ioctl = eventfd_ioctl, .fo_poll = timerfd_poll, .fo_kqfilter = timerfd_kqfilter, .fo_stat = timerfd_stat, .fo_close = timerfd_close, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, .fo_fill_kinfo = timerfd_fill_kinfo, .fo_flags = DFLAG_PASSABLE }; static void filt_timerfddetach(struct knote *kn); static int filt_timerfdread(struct knote *kn, long hint); static struct filterops timerfd_rfiltops = { .f_isfd = 1, .f_detach = filt_timerfddetach, .f_event = filt_timerfdread }; struct eventfd { eventfd_t efd_count; uint32_t efd_flags; struct selinfo efd_sel; struct mtx efd_lock; }; struct timerfd { clockid_t tfd_clockid; struct itimerspec tfd_time; struct callout tfd_callout; timerfd_t tfd_count; bool tfd_canceled; struct selinfo tfd_sel; struct mtx tfd_lock; }; static int eventfd_create(struct thread *td, uint32_t initval, int flags); static void linux_timerfd_expire(void *); static void linux_timerfd_curval(struct timerfd *, struct itimerspec *); static void epoll_fd_install(struct thread *td, int fd, epoll_udata_t udata) { struct linux_pemuldata *pem; struct epoll_emuldata *emd; struct proc *p; p = td->td_proc; pem = pem_find(p); KASSERT(pem != NULL, ("epoll proc emuldata not found.\n")); LINUX_PEM_XLOCK(pem); if (pem->epoll == NULL) { emd = malloc(EPOLL_SIZE(fd), M_EPOLL, M_WAITOK); emd->fdc = fd; pem->epoll = emd; } else { emd = pem->epoll; if (fd > emd->fdc) { emd = realloc(emd, EPOLL_SIZE(fd), M_EPOLL, M_WAITOK); emd->fdc = fd; pem->epoll = emd; } } emd->udata[fd] = udata; LINUX_PEM_XUNLOCK(pem); } static int epoll_create_common(struct thread *td, int flags) { int error; error = kern_kqueue(td, flags, NULL); if (error != 0) return (error); epoll_fd_install(td, EPOLL_DEF_SZ, 0); return (0); } #ifdef LINUX_LEGACY_SYSCALLS int linux_epoll_create(struct thread *td, struct linux_epoll_create_args *args) { /* * args->size is unused. Linux just tests it * and then forgets it as well. */ if (args->size <= 0) return (EINVAL); return (epoll_create_common(td, 0)); } #endif int linux_epoll_create1(struct thread *td, struct linux_epoll_create1_args *args) { int flags; if ((args->flags & ~(LINUX_O_CLOEXEC)) != 0) return (EINVAL); flags = 0; if ((args->flags & LINUX_O_CLOEXEC) != 0) flags |= O_CLOEXEC; return (epoll_create_common(td, flags)); } /* Structure converting function from epoll to kevent. */ static int epoll_to_kevent(struct thread *td, struct file *epfp, int fd, struct epoll_event *l_event, int *kev_flags, struct kevent *kevent, int *nkevents) { uint32_t levents = l_event->events; struct linux_pemuldata *pem; struct proc *p; /* flags related to how event is registered */ if ((levents & LINUX_EPOLLONESHOT) != 0) *kev_flags |= EV_ONESHOT; if ((levents & LINUX_EPOLLET) != 0) *kev_flags |= EV_CLEAR; if ((levents & LINUX_EPOLLERR) != 0) *kev_flags |= EV_ERROR; if ((levents & LINUX_EPOLLRDHUP) != 0) *kev_flags |= EV_EOF; /* flags related to what event is registered */ if ((levents & LINUX_EPOLL_EVRD) != 0) { EV_SET(kevent++, fd, EVFILT_READ, *kev_flags, 0, 0, 0); ++(*nkevents); } if ((levents & LINUX_EPOLL_EVWR) != 0) { EV_SET(kevent++, fd, EVFILT_WRITE, *kev_flags, 0, 0, 0); ++(*nkevents); } if ((levents & ~(LINUX_EPOLL_EVSUP)) != 0) { p = td->td_proc; pem = pem_find(p); KASSERT(pem != NULL, ("epoll proc emuldata not found.\n")); KASSERT(pem->epoll != NULL, ("epoll proc epolldata not found.\n")); LINUX_PEM_XLOCK(pem); if ((pem->flags & LINUX_XUNSUP_EPOLL) == 0) { pem->flags |= LINUX_XUNSUP_EPOLL; LINUX_PEM_XUNLOCK(pem); linux_msg(td, "epoll_ctl unsupported flags: 0x%x\n", levents); } else LINUX_PEM_XUNLOCK(pem); return (EINVAL); } return (0); } /* * Structure converting function from kevent to epoll. In a case * this is called on error in registration we store the error in * event->data and pick it up later in linux_epoll_ctl(). */ static void kevent_to_epoll(struct kevent *kevent, struct epoll_event *l_event) { if ((kevent->flags & EV_ERROR) != 0) { l_event->events = LINUX_EPOLLERR; return; } /* XXX EPOLLPRI, EPOLLHUP */ switch (kevent->filter) { case EVFILT_READ: l_event->events = LINUX_EPOLLIN; if ((kevent->flags & EV_EOF) != 0) l_event->events |= LINUX_EPOLLRDHUP; break; case EVFILT_WRITE: l_event->events = LINUX_EPOLLOUT; break; } } /* * Copyout callback used by kevent. This converts kevent * events to epoll events and copies them back to the * userspace. This is also called on error on registering * of the filter. */ static int epoll_kev_copyout(void *arg, struct kevent *kevp, int count) { struct epoll_copyout_args *args; struct linux_pemuldata *pem; struct epoll_emuldata *emd; struct epoll_event *eep; int error, fd, i; args = (struct epoll_copyout_args*) arg; eep = malloc(sizeof(*eep) * count, M_EPOLL, M_WAITOK | M_ZERO); pem = pem_find(args->p); KASSERT(pem != NULL, ("epoll proc emuldata not found.\n")); LINUX_PEM_SLOCK(pem); emd = pem->epoll; KASSERT(emd != NULL, ("epoll proc epolldata not found.\n")); for (i = 0; i < count; i++) { kevent_to_epoll(&kevp[i], &eep[i]); fd = kevp[i].ident; KASSERT(fd <= emd->fdc, ("epoll user data vector" " is too small.\n")); eep[i].data = emd->udata[fd]; } LINUX_PEM_SUNLOCK(pem); error = copyout(eep, args->leventlist, count * sizeof(*eep)); if (error == 0) { args->leventlist += count; args->count += count; } else if (args->error == 0) args->error = error; free(eep, M_EPOLL); return (error); } /* * Copyin callback used by kevent. This copies already * converted filters from kernel memory to the kevent * internal kernel memory. Hence the memcpy instead of * copyin. */ static int epoll_kev_copyin(void *arg, struct kevent *kevp, int count) { struct epoll_copyin_args *args; args = (struct epoll_copyin_args*) arg; memcpy(kevp, args->changelist, count * sizeof(*kevp)); args->changelist += count; return (0); } /* * Load epoll filter, convert it to kevent filter * and load it into kevent subsystem. */ int linux_epoll_ctl(struct thread *td, struct linux_epoll_ctl_args *args) { struct file *epfp, *fp; struct epoll_copyin_args ciargs; struct kevent kev[2]; struct kevent_copyops k_ops = { &ciargs, NULL, epoll_kev_copyin}; struct epoll_event le; cap_rights_t rights; int kev_flags; int nchanges = 0; int error; if (args->op != LINUX_EPOLL_CTL_DEL) { error = copyin(args->event, &le, sizeof(le)); if (error != 0) return (error); } error = fget(td, args->epfd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &epfp); if (error != 0) return (error); if (epfp->f_type != DTYPE_KQUEUE) { error = EINVAL; goto leave1; } /* Protect user data vector from incorrectly supplied fd. */ error = fget(td, args->fd, cap_rights_init(&rights, CAP_POLL_EVENT), &fp); if (error != 0) goto leave1; /* Linux disallows spying on himself */ if (epfp == fp) { error = EINVAL; goto leave0; } ciargs.changelist = kev; if (args->op != LINUX_EPOLL_CTL_DEL) { kev_flags = EV_ADD | EV_ENABLE; error = epoll_to_kevent(td, epfp, args->fd, &le, &kev_flags, kev, &nchanges); if (error != 0) goto leave0; } switch (args->op) { case LINUX_EPOLL_CTL_MOD: error = epoll_delete_all_events(td, epfp, args->fd); if (error != 0) goto leave0; break; case LINUX_EPOLL_CTL_ADD: /* * kqueue_register() return ENOENT if event does not exists * and the EV_ADD flag is not set. */ kev[0].flags &= ~EV_ADD; - error = kqfd_register(args->epfd, &kev[0], td, 1); + error = kqfd_register(args->epfd, &kev[0], td, M_WAITOK); if (error != ENOENT) { error = EEXIST; goto leave0; } error = 0; kev[0].flags |= EV_ADD; break; case LINUX_EPOLL_CTL_DEL: /* CTL_DEL means unregister this fd with this epoll */ error = epoll_delete_all_events(td, epfp, args->fd); goto leave0; default: error = EINVAL; goto leave0; } epoll_fd_install(td, args->fd, le.data); error = kern_kevent_fp(td, epfp, nchanges, 0, &k_ops, NULL); leave0: fdrop(fp, td); leave1: fdrop(epfp, td); return (error); } /* * Wait for a filter to be triggered on the epoll file descriptor. */ static int linux_epoll_wait_common(struct thread *td, int epfd, struct epoll_event *events, int maxevents, int timeout, sigset_t *uset) { struct epoll_copyout_args coargs; struct kevent_copyops k_ops = { &coargs, epoll_kev_copyout, NULL}; struct timespec ts, *tsp; cap_rights_t rights; struct file *epfp; sigset_t omask; int error; if (maxevents <= 0 || maxevents > LINUX_MAX_EVENTS) return (EINVAL); error = fget(td, epfd, cap_rights_init(&rights, CAP_KQUEUE_EVENT), &epfp); if (error != 0) return (error); if (epfp->f_type != DTYPE_KQUEUE) { error = EINVAL; goto leave1; } if (uset != NULL) { error = kern_sigprocmask(td, SIG_SETMASK, uset, &omask, 0); if (error != 0) goto leave1; td->td_pflags |= TDP_OLDMASK; /* * Make sure that ast() is called on return to * usermode and TDP_OLDMASK is cleared, restoring old * sigmask. */ thread_lock(td); td->td_flags |= TDF_ASTPENDING; thread_unlock(td); } coargs.leventlist = events; coargs.p = td->td_proc; coargs.count = 0; coargs.error = 0; if (timeout != -1) { if (timeout < 0) { error = EINVAL; goto leave0; } /* Convert from milliseconds to timespec. */ ts.tv_sec = timeout / 1000; ts.tv_nsec = (timeout % 1000) * 1000000; tsp = &ts; } else { tsp = NULL; } error = kern_kevent_fp(td, epfp, 0, maxevents, &k_ops, tsp); if (error == 0 && coargs.error != 0) error = coargs.error; /* * kern_kevent might return ENOMEM which is not expected from epoll_wait. * Maybe we should translate that but I don't think it matters at all. */ if (error == 0) td->td_retval[0] = coargs.count; leave0: if (uset != NULL) error = kern_sigprocmask(td, SIG_SETMASK, &omask, NULL, 0); leave1: fdrop(epfp, td); return (error); } #ifdef LINUX_LEGACY_SYSCALLS int linux_epoll_wait(struct thread *td, struct linux_epoll_wait_args *args) { return (linux_epoll_wait_common(td, args->epfd, args->events, args->maxevents, args->timeout, NULL)); } #endif int linux_epoll_pwait(struct thread *td, struct linux_epoll_pwait_args *args) { sigset_t mask, *pmask; l_sigset_t lmask; int error; if (args->mask != NULL) { if (args->sigsetsize != sizeof(l_sigset_t)) return (EINVAL); error = copyin(args->mask, &lmask, sizeof(l_sigset_t)); if (error != 0) return (error); linux_to_bsd_sigset(&lmask, &mask); pmask = &mask; } else pmask = NULL; return (linux_epoll_wait_common(td, args->epfd, args->events, args->maxevents, args->timeout, pmask)); } static int epoll_delete_event(struct thread *td, struct file *epfp, int fd, int filter) { struct epoll_copyin_args ciargs; struct kevent kev; struct kevent_copyops k_ops = { &ciargs, NULL, epoll_kev_copyin}; ciargs.changelist = &kev; EV_SET(&kev, fd, filter, EV_DELETE | EV_DISABLE, 0, 0, 0); return (kern_kevent_fp(td, epfp, 1, 0, &k_ops, NULL)); } static int epoll_delete_all_events(struct thread *td, struct file *epfp, int fd) { int error1, error2; error1 = epoll_delete_event(td, epfp, fd, EVFILT_READ); error2 = epoll_delete_event(td, epfp, fd, EVFILT_WRITE); /* return 0 if at least one result positive */ return (error1 == 0 ? 0 : error2); } static int eventfd_create(struct thread *td, uint32_t initval, int flags) { struct filedesc *fdp; struct eventfd *efd; struct file *fp; int fflags, fd, error; fflags = 0; if ((flags & LINUX_O_CLOEXEC) != 0) fflags |= O_CLOEXEC; fdp = td->td_proc->p_fd; error = falloc(td, &fp, &fd, fflags); if (error != 0) return (error); efd = malloc(sizeof(*efd), M_EPOLL, M_WAITOK | M_ZERO); efd->efd_flags = flags; efd->efd_count = initval; mtx_init(&efd->efd_lock, "eventfd", NULL, MTX_DEF); knlist_init_mtx(&efd->efd_sel.si_note, &efd->efd_lock); fflags = FREAD | FWRITE; if ((flags & LINUX_O_NONBLOCK) != 0) fflags |= FNONBLOCK; finit(fp, fflags, DTYPE_LINUXEFD, efd, &eventfdops); fdrop(fp, td); td->td_retval[0] = fd; return (error); } #ifdef LINUX_LEGACY_SYSCALLS int linux_eventfd(struct thread *td, struct linux_eventfd_args *args) { return (eventfd_create(td, args->initval, 0)); } #endif int linux_eventfd2(struct thread *td, struct linux_eventfd2_args *args) { if ((args->flags & ~(LINUX_O_CLOEXEC|LINUX_O_NONBLOCK|LINUX_EFD_SEMAPHORE)) != 0) return (EINVAL); return (eventfd_create(td, args->initval, args->flags)); } static int eventfd_close(struct file *fp, struct thread *td) { struct eventfd *efd; efd = fp->f_data; if (fp->f_type != DTYPE_LINUXEFD || efd == NULL) return (EINVAL); seldrain(&efd->efd_sel); knlist_destroy(&efd->efd_sel.si_note); fp->f_ops = &badfileops; mtx_destroy(&efd->efd_lock); free(efd, M_EPOLL); return (0); } static int eventfd_read(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct eventfd *efd; eventfd_t count; int error; efd = fp->f_data; if (fp->f_type != DTYPE_LINUXEFD || efd == NULL) return (EINVAL); if (uio->uio_resid < sizeof(eventfd_t)) return (EINVAL); error = 0; mtx_lock(&efd->efd_lock); retry: if (efd->efd_count == 0) { if ((fp->f_flag & FNONBLOCK) != 0) { mtx_unlock(&efd->efd_lock); return (EAGAIN); } error = mtx_sleep(&efd->efd_count, &efd->efd_lock, PCATCH, "lefdrd", 0); if (error == 0) goto retry; } if (error == 0) { if ((efd->efd_flags & LINUX_EFD_SEMAPHORE) != 0) { count = 1; --efd->efd_count; } else { count = efd->efd_count; efd->efd_count = 0; } KNOTE_LOCKED(&efd->efd_sel.si_note, 0); selwakeup(&efd->efd_sel); wakeup(&efd->efd_count); mtx_unlock(&efd->efd_lock); error = uiomove(&count, sizeof(eventfd_t), uio); } else mtx_unlock(&efd->efd_lock); return (error); } static int eventfd_write(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct eventfd *efd; eventfd_t count; int error; efd = fp->f_data; if (fp->f_type != DTYPE_LINUXEFD || efd == NULL) return (EINVAL); if (uio->uio_resid < sizeof(eventfd_t)) return (EINVAL); error = uiomove(&count, sizeof(eventfd_t), uio); if (error != 0) return (error); if (count == UINT64_MAX) return (EINVAL); mtx_lock(&efd->efd_lock); retry: if (UINT64_MAX - efd->efd_count <= count) { if ((fp->f_flag & FNONBLOCK) != 0) { mtx_unlock(&efd->efd_lock); /* Do not not return the number of bytes written */ uio->uio_resid += sizeof(eventfd_t); return (EAGAIN); } error = mtx_sleep(&efd->efd_count, &efd->efd_lock, PCATCH, "lefdwr", 0); if (error == 0) goto retry; } if (error == 0) { efd->efd_count += count; KNOTE_LOCKED(&efd->efd_sel.si_note, 0); selwakeup(&efd->efd_sel); wakeup(&efd->efd_count); } mtx_unlock(&efd->efd_lock); return (error); } static int eventfd_poll(struct file *fp, int events, struct ucred *active_cred, struct thread *td) { struct eventfd *efd; int revents = 0; efd = fp->f_data; if (fp->f_type != DTYPE_LINUXEFD || efd == NULL) return (POLLERR); mtx_lock(&efd->efd_lock); if ((events & (POLLIN|POLLRDNORM)) && efd->efd_count > 0) revents |= events & (POLLIN|POLLRDNORM); if ((events & (POLLOUT|POLLWRNORM)) && UINT64_MAX - 1 > efd->efd_count) revents |= events & (POLLOUT|POLLWRNORM); if (revents == 0) selrecord(td, &efd->efd_sel); mtx_unlock(&efd->efd_lock); return (revents); } /*ARGSUSED*/ static int eventfd_kqfilter(struct file *fp, struct knote *kn) { struct eventfd *efd; efd = fp->f_data; if (fp->f_type != DTYPE_LINUXEFD || efd == NULL) return (EINVAL); mtx_lock(&efd->efd_lock); switch (kn->kn_filter) { case EVFILT_READ: kn->kn_fop = &eventfd_rfiltops; break; case EVFILT_WRITE: kn->kn_fop = &eventfd_wfiltops; break; default: mtx_unlock(&efd->efd_lock); return (EINVAL); } kn->kn_hook = efd; knlist_add(&efd->efd_sel.si_note, kn, 1); mtx_unlock(&efd->efd_lock); return (0); } static void filt_eventfddetach(struct knote *kn) { struct eventfd *efd = kn->kn_hook; mtx_lock(&efd->efd_lock); knlist_remove(&efd->efd_sel.si_note, kn, 1); mtx_unlock(&efd->efd_lock); } /*ARGSUSED*/ static int filt_eventfdread(struct knote *kn, long hint) { struct eventfd *efd = kn->kn_hook; int ret; mtx_assert(&efd->efd_lock, MA_OWNED); ret = (efd->efd_count > 0); return (ret); } /*ARGSUSED*/ static int filt_eventfdwrite(struct knote *kn, long hint) { struct eventfd *efd = kn->kn_hook; int ret; mtx_assert(&efd->efd_lock, MA_OWNED); ret = (UINT64_MAX - 1 > efd->efd_count); return (ret); } /*ARGSUSED*/ static int eventfd_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred, struct thread *td) { if (fp->f_data == NULL || (fp->f_type != DTYPE_LINUXEFD && fp->f_type != DTYPE_LINUXTFD)) return (EINVAL); switch (cmd) { case FIONBIO: if ((*(int *)data)) atomic_set_int(&fp->f_flag, FNONBLOCK); else atomic_clear_int(&fp->f_flag, FNONBLOCK); case FIOASYNC: return (0); default: return (ENXIO); } } /*ARGSUSED*/ static int eventfd_stat(struct file *fp, struct stat *st, struct ucred *active_cred, struct thread *td) { return (ENXIO); } /*ARGSUSED*/ static int eventfd_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { kif->kf_type = KF_TYPE_UNKNOWN; return (0); } int linux_timerfd_create(struct thread *td, struct linux_timerfd_create_args *args) { struct filedesc *fdp; struct timerfd *tfd; struct file *fp; clockid_t clockid; int fflags, fd, error; if ((args->flags & ~LINUX_TFD_CREATE_FLAGS) != 0) return (EINVAL); error = linux_to_native_clockid(&clockid, args->clockid); if (error != 0) return (error); if (clockid != CLOCK_REALTIME && clockid != CLOCK_MONOTONIC) return (EINVAL); fflags = 0; if ((args->flags & LINUX_TFD_CLOEXEC) != 0) fflags |= O_CLOEXEC; fdp = td->td_proc->p_fd; error = falloc(td, &fp, &fd, fflags); if (error != 0) return (error); tfd = malloc(sizeof(*tfd), M_EPOLL, M_WAITOK | M_ZERO); tfd->tfd_clockid = clockid; mtx_init(&tfd->tfd_lock, "timerfd", NULL, MTX_DEF); callout_init_mtx(&tfd->tfd_callout, &tfd->tfd_lock, 0); knlist_init_mtx(&tfd->tfd_sel.si_note, &tfd->tfd_lock); fflags = FREAD; if ((args->flags & LINUX_O_NONBLOCK) != 0) fflags |= FNONBLOCK; finit(fp, fflags, DTYPE_LINUXTFD, tfd, &timerfdops); fdrop(fp, td); td->td_retval[0] = fd; return (error); } static int timerfd_close(struct file *fp, struct thread *td) { struct timerfd *tfd; tfd = fp->f_data; if (fp->f_type != DTYPE_LINUXTFD || tfd == NULL) return (EINVAL); timespecclear(&tfd->tfd_time.it_value); timespecclear(&tfd->tfd_time.it_interval); mtx_lock(&tfd->tfd_lock); callout_drain(&tfd->tfd_callout); mtx_unlock(&tfd->tfd_lock); seldrain(&tfd->tfd_sel); knlist_destroy(&tfd->tfd_sel.si_note); fp->f_ops = &badfileops; mtx_destroy(&tfd->tfd_lock); free(tfd, M_EPOLL); return (0); } static int timerfd_read(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct timerfd *tfd; timerfd_t count; int error; tfd = fp->f_data; if (fp->f_type != DTYPE_LINUXTFD || tfd == NULL) return (EINVAL); if (uio->uio_resid < sizeof(timerfd_t)) return (EINVAL); error = 0; mtx_lock(&tfd->tfd_lock); retry: if (tfd->tfd_canceled) { tfd->tfd_count = 0; mtx_unlock(&tfd->tfd_lock); return (ECANCELED); } if (tfd->tfd_count == 0) { if ((fp->f_flag & FNONBLOCK) != 0) { mtx_unlock(&tfd->tfd_lock); return (EAGAIN); } error = mtx_sleep(&tfd->tfd_count, &tfd->tfd_lock, PCATCH, "ltfdrd", 0); if (error == 0) goto retry; } if (error == 0) { count = tfd->tfd_count; tfd->tfd_count = 0; mtx_unlock(&tfd->tfd_lock); error = uiomove(&count, sizeof(timerfd_t), uio); } else mtx_unlock(&tfd->tfd_lock); return (error); } static int timerfd_poll(struct file *fp, int events, struct ucred *active_cred, struct thread *td) { struct timerfd *tfd; int revents = 0; tfd = fp->f_data; if (fp->f_type != DTYPE_LINUXTFD || tfd == NULL) return (POLLERR); mtx_lock(&tfd->tfd_lock); if ((events & (POLLIN|POLLRDNORM)) && tfd->tfd_count > 0) revents |= events & (POLLIN|POLLRDNORM); if (revents == 0) selrecord(td, &tfd->tfd_sel); mtx_unlock(&tfd->tfd_lock); return (revents); } /*ARGSUSED*/ static int timerfd_kqfilter(struct file *fp, struct knote *kn) { struct timerfd *tfd; tfd = fp->f_data; if (fp->f_type != DTYPE_LINUXTFD || tfd == NULL) return (EINVAL); if (kn->kn_filter == EVFILT_READ) kn->kn_fop = &timerfd_rfiltops; else return (EINVAL); kn->kn_hook = tfd; knlist_add(&tfd->tfd_sel.si_note, kn, 0); return (0); } static void filt_timerfddetach(struct knote *kn) { struct timerfd *tfd = kn->kn_hook; mtx_lock(&tfd->tfd_lock); knlist_remove(&tfd->tfd_sel.si_note, kn, 1); mtx_unlock(&tfd->tfd_lock); } /*ARGSUSED*/ static int filt_timerfdread(struct knote *kn, long hint) { struct timerfd *tfd = kn->kn_hook; return (tfd->tfd_count > 0); } /*ARGSUSED*/ static int timerfd_stat(struct file *fp, struct stat *st, struct ucred *active_cred, struct thread *td) { return (ENXIO); } /*ARGSUSED*/ static int timerfd_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { kif->kf_type = KF_TYPE_UNKNOWN; return (0); } static void linux_timerfd_clocktime(struct timerfd *tfd, struct timespec *ts) { if (tfd->tfd_clockid == CLOCK_REALTIME) getnanotime(ts); else /* CLOCK_MONOTONIC */ getnanouptime(ts); } static void linux_timerfd_curval(struct timerfd *tfd, struct itimerspec *ots) { struct timespec cts; linux_timerfd_clocktime(tfd, &cts); *ots = tfd->tfd_time; if (ots->it_value.tv_sec != 0 || ots->it_value.tv_nsec != 0) { timespecsub(&ots->it_value, &cts, &ots->it_value); if (ots->it_value.tv_sec < 0 || (ots->it_value.tv_sec == 0 && ots->it_value.tv_nsec == 0)) { ots->it_value.tv_sec = 0; ots->it_value.tv_nsec = 1; } } } int linux_timerfd_gettime(struct thread *td, struct linux_timerfd_gettime_args *args) { struct l_itimerspec lots; struct itimerspec ots; struct timerfd *tfd; struct file *fp; int error; error = fget(td, args->fd, &cap_read_rights, &fp); if (error != 0) return (error); tfd = fp->f_data; if (fp->f_type != DTYPE_LINUXTFD || tfd == NULL) { error = EINVAL; goto out; } mtx_lock(&tfd->tfd_lock); linux_timerfd_curval(tfd, &ots); mtx_unlock(&tfd->tfd_lock); error = native_to_linux_itimerspec(&lots, &ots); if (error == 0) error = copyout(&lots, args->old_value, sizeof(lots)); out: fdrop(fp, td); return (error); } int linux_timerfd_settime(struct thread *td, struct linux_timerfd_settime_args *args) { struct l_itimerspec lots; struct itimerspec nts, ots; struct timespec cts, ts; struct timerfd *tfd; struct timeval tv; struct file *fp; int error; if ((args->flags & ~LINUX_TFD_SETTIME_FLAGS) != 0) return (EINVAL); error = copyin(args->new_value, &lots, sizeof(lots)); if (error != 0) return (error); error = linux_to_native_itimerspec(&nts, &lots); if (error != 0) return (error); error = fget(td, args->fd, &cap_write_rights, &fp); if (error != 0) return (error); tfd = fp->f_data; if (fp->f_type != DTYPE_LINUXTFD || tfd == NULL) { error = EINVAL; goto out; } mtx_lock(&tfd->tfd_lock); if (!timespecisset(&nts.it_value)) timespecclear(&nts.it_interval); if (args->old_value != NULL) linux_timerfd_curval(tfd, &ots); tfd->tfd_time = nts; if (timespecisset(&nts.it_value)) { linux_timerfd_clocktime(tfd, &cts); ts = nts.it_value; if ((args->flags & LINUX_TFD_TIMER_ABSTIME) == 0) { timespecadd(&tfd->tfd_time.it_value, &cts, &tfd->tfd_time.it_value); } else { timespecsub(&ts, &cts, &ts); } TIMESPEC_TO_TIMEVAL(&tv, &ts); callout_reset(&tfd->tfd_callout, tvtohz(&tv), linux_timerfd_expire, tfd); tfd->tfd_canceled = false; } else { tfd->tfd_canceled = true; callout_stop(&tfd->tfd_callout); } mtx_unlock(&tfd->tfd_lock); if (args->old_value != NULL) { error = native_to_linux_itimerspec(&lots, &ots); if (error == 0) error = copyout(&lots, args->old_value, sizeof(lots)); } out: fdrop(fp, td); return (error); } static void linux_timerfd_expire(void *arg) { struct timespec cts, ts; struct timeval tv; struct timerfd *tfd; tfd = (struct timerfd *)arg; linux_timerfd_clocktime(tfd, &cts); if (timespeccmp(&cts, &tfd->tfd_time.it_value, >=)) { if (timespecisset(&tfd->tfd_time.it_interval)) timespecadd(&tfd->tfd_time.it_value, &tfd->tfd_time.it_interval, &tfd->tfd_time.it_value); else /* single shot timer */ timespecclear(&tfd->tfd_time.it_value); if (timespecisset(&tfd->tfd_time.it_value)) { timespecsub(&tfd->tfd_time.it_value, &cts, &ts); TIMESPEC_TO_TIMEVAL(&tv, &ts); callout_reset(&tfd->tfd_callout, tvtohz(&tv), linux_timerfd_expire, tfd); } tfd->tfd_count++; KNOTE_LOCKED(&tfd->tfd_sel.si_note, 0); selwakeup(&tfd->tfd_sel); wakeup(&tfd->tfd_count); } else if (timespecisset(&tfd->tfd_time.it_value)) { timespecsub(&tfd->tfd_time.it_value, &cts, &ts); TIMESPEC_TO_TIMEVAL(&tv, &ts); callout_reset(&tfd->tfd_callout, tvtohz(&tv), linux_timerfd_expire, tfd); } } Index: projects/clang700-import/sys/contrib/ipfilter/netinet/ip_fil_freebsd.c =================================================================== --- projects/clang700-import/sys/contrib/ipfilter/netinet/ip_fil_freebsd.c (revision 340917) +++ projects/clang700-import/sys/contrib/ipfilter/netinet/ip_fil_freebsd.c (revision 340918) @@ -1,1473 +1,1472 @@ /* $FreeBSD$ */ /* * Copyright (C) 2012 by Darren Reed. * * See the IPFILTER.LICENCE file for details on licencing. */ #if !defined(lint) static const char sccsid[] = "@(#)ip_fil.c 2.41 6/5/96 (C) 1993-2000 Darren Reed"; static const char rcsid[] = "@(#)$Id$"; #endif #if defined(KERNEL) || defined(_KERNEL) # undef KERNEL # undef _KERNEL # define KERNEL 1 # define _KERNEL 1 #endif #if defined(__FreeBSD_version) && (__FreeBSD_version >= 400000) && \ !defined(KLD_MODULE) && !defined(IPFILTER_LKM) # include "opt_inet6.h" #endif #if defined(__FreeBSD_version) && (__FreeBSD_version >= 440000) && \ !defined(KLD_MODULE) && !defined(IPFILTER_LKM) # include "opt_random_ip_id.h" #endif #include #include #include #include # include # include #include #include # include #if defined(__FreeBSD_version) && (__FreeBSD_version >= 800000) #include #endif # include # include # include #if !defined(__hpux) # include #endif #include # include # include #include # include # include #include #include #include #include #include #include #include #include #include #include #include #include #include "netinet/ip_compat.h" #ifdef USE_INET6 # include #endif #include "netinet/ip_fil.h" #include "netinet/ip_nat.h" #include "netinet/ip_frag.h" #include "netinet/ip_state.h" #include "netinet/ip_proxy.h" #include "netinet/ip_auth.h" #include "netinet/ip_sync.h" #include "netinet/ip_lookup.h" #include "netinet/ip_dstlist.h" #ifdef IPFILTER_SCAN #include "netinet/ip_scan.h" #endif #include "netinet/ip_pool.h" # include #include #ifdef CSUM_DATA_VALID #include #endif extern int ip_optcopy __P((struct ip *, struct ip *)); # ifdef IPFILTER_M_IPFILTER MALLOC_DEFINE(M_IPFILTER, "ipfilter", "IP Filter packet filter data structures"); # endif static int ipf_send_ip __P((fr_info_t *, mb_t *)); static void ipf_timer_func __P((void *arg)); VNET_DEFINE(ipf_main_softc_t, ipfmain) = { .ipf_running = -2, }; #define V_ipfmain VNET(ipfmain) # include # if defined(NETBSD_PF) # include # endif /* NETBSD_PF */ static eventhandler_tag ipf_arrivetag, ipf_departtag; #if 0 /* * Disable the "cloner" event handler; we are getting interface * events before the firewall is fully initiallized and also no vnet * information thus leading to uninitialised memory accesses. * In addition it is unclear why we need it in first place. * If it turns out to be needed, well need a dedicated event handler * for it to deal with the ifc and the correct vnet. */ static eventhandler_tag ipf_clonetag; #endif static void ipf_ifevent(void *arg, struct ifnet *ifp); static void ipf_ifevent(arg, ifp) void *arg; struct ifnet *ifp; { CURVNET_SET(ifp->if_vnet); if (V_ipfmain.ipf_running > 0) ipf_sync(&V_ipfmain, NULL); CURVNET_RESTORE(); } static int ipf_check_wrapper(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) { struct ip *ip = mtod(*mp, struct ip *); int rv; /* * IPFilter expects evreything in network byte order */ #if (__FreeBSD_version < 1000019) ip->ip_len = htons(ip->ip_len); ip->ip_off = htons(ip->ip_off); #endif CURVNET_SET(ifp->if_vnet); rv = ipf_check(&V_ipfmain, ip, ip->ip_hl << 2, ifp, (dir == PFIL_OUT), mp); CURVNET_RESTORE(); #if (__FreeBSD_version < 1000019) if ((rv == 0) && (*mp != NULL)) { ip = mtod(*mp, struct ip *); ip->ip_len = ntohs(ip->ip_len); ip->ip_off = ntohs(ip->ip_off); } #endif return rv; } # ifdef USE_INET6 # include static int ipf_check_wrapper6(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) { int error; CURVNET_SET(ifp->if_vnet); error = ipf_check(&V_ipfmain, mtod(*mp, struct ip *), sizeof(struct ip6_hdr), ifp, (dir == PFIL_OUT), mp); CURVNET_RESTORE(); return (error); } # endif #if defined(IPFILTER_LKM) int ipf_identify(s) char *s; { if (strcmp(s, "ipl") == 0) return 1; return 0; } #endif /* IPFILTER_LKM */ static void ipf_timer_func(arg) void *arg; { ipf_main_softc_t *softc = arg; SPL_INT(s); SPL_NET(s); READ_ENTER(&softc->ipf_global); if (softc->ipf_running > 0) ipf_slowtimer(softc); if (softc->ipf_running == -1 || softc->ipf_running == 1) { #if 0 softc->ipf_slow_ch = timeout(ipf_timer_func, softc, hz/2); #endif callout_init(&softc->ipf_slow_ch, 1); callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT, ipf_timer_func, softc); } RWLOCK_EXIT(&softc->ipf_global); SPL_X(s); } int ipfattach(softc) ipf_main_softc_t *softc; { #ifdef USE_SPL int s; #endif SPL_NET(s); if (softc->ipf_running > 0) { SPL_X(s); return EBUSY; } if (ipf_init_all(softc) < 0) { SPL_X(s); return EIO; } bzero((char *)V_ipfmain.ipf_selwait, sizeof(V_ipfmain.ipf_selwait)); softc->ipf_running = 1; if (softc->ipf_control_forwarding & 1) V_ipforwarding = 1; SPL_X(s); #if 0 softc->ipf_slow_ch = timeout(ipf_timer_func, softc, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT); #endif callout_init(&softc->ipf_slow_ch, 1); callout_reset(&softc->ipf_slow_ch, (hz / IPF_HZ_DIVIDE) * IPF_HZ_MULT, ipf_timer_func, softc); return 0; } /* * Disable the filter by removing the hooks from the IP input/output * stream. */ int ipfdetach(softc) ipf_main_softc_t *softc; { #ifdef USE_SPL int s; #endif if (softc->ipf_control_forwarding & 2) V_ipforwarding = 0; SPL_NET(s); #if 0 if (softc->ipf_slow_ch.callout != NULL) untimeout(ipf_timer_func, softc, softc->ipf_slow_ch); bzero(&softc->ipf_slow, sizeof(softc->ipf_slow)); #endif callout_drain(&softc->ipf_slow_ch); ipf_fini_all(softc); softc->ipf_running = -2; SPL_X(s); return 0; } /* * Filter ioctl interface. */ int -ipfioctl(dev, cmd, data, mode -, p) +ipfioctl(dev, cmd, data, mode, p) struct thread *p; # define p_cred td_ucred # define p_uid td_ucred->cr_ruid struct cdev *dev; ioctlcmd_t cmd; caddr_t data; int mode; { int error = 0, unit = 0; SPL_INT(s); CURVNET_SET(TD_TO_VNET(p)); #if (BSD >= 199306) if (securelevel_ge(p->p_cred, 3) && (mode & FWRITE)) { V_ipfmain.ipf_interror = 130001; CURVNET_RESTORE(); return EPERM; } #endif unit = GET_MINOR(dev); if ((IPL_LOGMAX < unit) || (unit < 0)) { V_ipfmain.ipf_interror = 130002; CURVNET_RESTORE(); return ENXIO; } if (V_ipfmain.ipf_running <= 0) { if (unit != IPL_LOGIPF && cmd != SIOCIPFINTERROR) { V_ipfmain.ipf_interror = 130003; CURVNET_RESTORE(); return EIO; } if (cmd != SIOCIPFGETNEXT && cmd != SIOCIPFGET && cmd != SIOCIPFSET && cmd != SIOCFRENB && cmd != SIOCGETFS && cmd != SIOCGETFF && cmd != SIOCIPFINTERROR) { V_ipfmain.ipf_interror = 130004; CURVNET_RESTORE(); return EIO; } } SPL_NET(s); error = ipf_ioctlswitch(&V_ipfmain, unit, data, cmd, mode, p->p_uid, p); CURVNET_RESTORE(); if (error != -1) { SPL_X(s); return error; } SPL_X(s); return error; } /* * ipf_send_reset - this could conceivably be a call to tcp_respond(), but that * requires a large amount of setting up and isn't any more efficient. */ int ipf_send_reset(fin) fr_info_t *fin; { struct tcphdr *tcp, *tcp2; int tlen = 0, hlen; struct mbuf *m; #ifdef USE_INET6 ip6_t *ip6; #endif ip_t *ip; tcp = fin->fin_dp; if (tcp->th_flags & TH_RST) return -1; /* feedback loop */ if (ipf_checkl4sum(fin) == -1) return -1; tlen = fin->fin_dlen - (TCP_OFF(tcp) << 2) + ((tcp->th_flags & TH_SYN) ? 1 : 0) + ((tcp->th_flags & TH_FIN) ? 1 : 0); #ifdef USE_INET6 hlen = (fin->fin_v == 6) ? sizeof(ip6_t) : sizeof(ip_t); #else hlen = sizeof(ip_t); #endif #ifdef MGETHDR MGETHDR(m, M_NOWAIT, MT_HEADER); #else MGET(m, M_NOWAIT, MT_HEADER); #endif if (m == NULL) return -1; if (sizeof(*tcp2) + hlen > MLEN) { if (!(MCLGET(m, M_NOWAIT))) { FREE_MB_T(m); return -1; } } m->m_len = sizeof(*tcp2) + hlen; #if (BSD >= 199103) m->m_data += max_linkhdr; m->m_pkthdr.len = m->m_len; m->m_pkthdr.rcvif = (struct ifnet *)0; #endif ip = mtod(m, struct ip *); bzero((char *)ip, hlen); #ifdef USE_INET6 ip6 = (ip6_t *)ip; #endif tcp2 = (struct tcphdr *)((char *)ip + hlen); tcp2->th_sport = tcp->th_dport; tcp2->th_dport = tcp->th_sport; if (tcp->th_flags & TH_ACK) { tcp2->th_seq = tcp->th_ack; tcp2->th_flags = TH_RST; tcp2->th_ack = 0; } else { tcp2->th_seq = 0; tcp2->th_ack = ntohl(tcp->th_seq); tcp2->th_ack += tlen; tcp2->th_ack = htonl(tcp2->th_ack); tcp2->th_flags = TH_RST|TH_ACK; } TCP_X2_A(tcp2, 0); TCP_OFF_A(tcp2, sizeof(*tcp2) >> 2); tcp2->th_win = tcp->th_win; tcp2->th_sum = 0; tcp2->th_urp = 0; #ifdef USE_INET6 if (fin->fin_v == 6) { ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow; ip6->ip6_plen = htons(sizeof(struct tcphdr)); ip6->ip6_nxt = IPPROTO_TCP; ip6->ip6_hlim = 0; ip6->ip6_src = fin->fin_dst6.in6; ip6->ip6_dst = fin->fin_src6.in6; tcp2->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(*ip6), sizeof(*tcp2)); return ipf_send_ip(fin, m); } #endif ip->ip_p = IPPROTO_TCP; ip->ip_len = htons(sizeof(struct tcphdr)); ip->ip_src.s_addr = fin->fin_daddr; ip->ip_dst.s_addr = fin->fin_saddr; tcp2->th_sum = in_cksum(m, hlen + sizeof(*tcp2)); ip->ip_len = htons(hlen + sizeof(*tcp2)); return ipf_send_ip(fin, m); } /* * ip_len must be in network byte order when called. */ static int ipf_send_ip(fin, m) fr_info_t *fin; mb_t *m; { fr_info_t fnew; ip_t *ip, *oip; int hlen; ip = mtod(m, ip_t *); bzero((char *)&fnew, sizeof(fnew)); fnew.fin_main_soft = fin->fin_main_soft; IP_V_A(ip, fin->fin_v); switch (fin->fin_v) { case 4 : oip = fin->fin_ip; hlen = sizeof(*oip); fnew.fin_v = 4; fnew.fin_p = ip->ip_p; fnew.fin_plen = ntohs(ip->ip_len); IP_HL_A(ip, sizeof(*oip) >> 2); ip->ip_tos = oip->ip_tos; ip->ip_id = fin->fin_ip->ip_id; #if defined(FreeBSD) && (__FreeBSD_version > 460000) ip->ip_off = htons(path_mtu_discovery ? IP_DF : 0); #else ip->ip_off = 0; #endif ip->ip_ttl = V_ip_defttl; ip->ip_sum = 0; break; #ifdef USE_INET6 case 6 : { ip6_t *ip6 = (ip6_t *)ip; ip6->ip6_vfc = 0x60; ip6->ip6_hlim = IPDEFTTL; hlen = sizeof(*ip6); fnew.fin_p = ip6->ip6_nxt; fnew.fin_v = 6; fnew.fin_plen = ntohs(ip6->ip6_plen) + hlen; break; } #endif default : return EINVAL; } #ifdef IPSEC m->m_pkthdr.rcvif = NULL; #endif fnew.fin_ifp = fin->fin_ifp; fnew.fin_flx = FI_NOCKSUM; fnew.fin_m = m; fnew.fin_ip = ip; fnew.fin_mp = &m; fnew.fin_hlen = hlen; fnew.fin_dp = (char *)ip + hlen; (void) ipf_makefrip(hlen, ip, &fnew); return ipf_fastroute(m, &m, &fnew, NULL); } int ipf_send_icmp_err(type, fin, dst) int type; fr_info_t *fin; int dst; { int err, hlen, xtra, iclen, ohlen, avail, code; struct in_addr dst4; struct icmp *icmp; struct mbuf *m; i6addr_t dst6; void *ifp; #ifdef USE_INET6 ip6_t *ip6; #endif ip_t *ip, *ip2; if ((type < 0) || (type >= ICMP_MAXTYPE)) return -1; code = fin->fin_icode; #ifdef USE_INET6 #if 0 /* XXX Fix an off by one error: s/>/>=/ was: if ((code < 0) || (code > sizeof(icmptoicmp6unreach)/sizeof(int))) Fix obtained from NetBSD ip_fil_netbsd.c r1.4: */ #endif if ((code < 0) || (code >= sizeof(icmptoicmp6unreach)/sizeof(int))) return -1; #endif if (ipf_checkl4sum(fin) == -1) return -1; #ifdef MGETHDR MGETHDR(m, M_NOWAIT, MT_HEADER); #else MGET(m, M_NOWAIT, MT_HEADER); #endif if (m == NULL) return -1; avail = MHLEN; xtra = 0; hlen = 0; ohlen = 0; dst4.s_addr = 0; ifp = fin->fin_ifp; if (fin->fin_v == 4) { if ((fin->fin_p == IPPROTO_ICMP) && !(fin->fin_flx & FI_SHORT)) switch (ntohs(fin->fin_data[0]) >> 8) { case ICMP_ECHO : case ICMP_TSTAMP : case ICMP_IREQ : case ICMP_MASKREQ : break; default : FREE_MB_T(m); return 0; } if (dst == 0) { if (ipf_ifpaddr(&V_ipfmain, 4, FRI_NORMAL, ifp, &dst6, NULL) == -1) { FREE_MB_T(m); return -1; } dst4 = dst6.in4; } else dst4.s_addr = fin->fin_daddr; hlen = sizeof(ip_t); ohlen = fin->fin_hlen; iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen; if (fin->fin_hlen < fin->fin_plen) xtra = MIN(fin->fin_dlen, 8); else xtra = 0; } #ifdef USE_INET6 else if (fin->fin_v == 6) { hlen = sizeof(ip6_t); ohlen = sizeof(ip6_t); iclen = hlen + offsetof(struct icmp, icmp_ip) + ohlen; type = icmptoicmp6types[type]; if (type == ICMP6_DST_UNREACH) code = icmptoicmp6unreach[code]; if (iclen + max_linkhdr + fin->fin_plen > avail) { if (!(MCLGET(m, M_NOWAIT))) { FREE_MB_T(m); return -1; } avail = MCLBYTES; } xtra = MIN(fin->fin_plen, avail - iclen - max_linkhdr); xtra = MIN(xtra, IPV6_MMTU - iclen); if (dst == 0) { if (ipf_ifpaddr(&V_ipfmain, 6, FRI_NORMAL, ifp, &dst6, NULL) == -1) { FREE_MB_T(m); return -1; } } else dst6 = fin->fin_dst6; } #endif else { FREE_MB_T(m); return -1; } avail -= (max_linkhdr + iclen); if (avail < 0) { FREE_MB_T(m); return -1; } if (xtra > avail) xtra = avail; iclen += xtra; m->m_data += max_linkhdr; m->m_pkthdr.rcvif = (struct ifnet *)0; m->m_pkthdr.len = iclen; m->m_len = iclen; ip = mtod(m, ip_t *); icmp = (struct icmp *)((char *)ip + hlen); ip2 = (ip_t *)&icmp->icmp_ip; icmp->icmp_type = type; icmp->icmp_code = fin->fin_icode; icmp->icmp_cksum = 0; #ifdef icmp_nextmtu if (type == ICMP_UNREACH && fin->fin_icode == ICMP_UNREACH_NEEDFRAG) { if (fin->fin_mtu != 0) { icmp->icmp_nextmtu = htons(fin->fin_mtu); } else if (ifp != NULL) { icmp->icmp_nextmtu = htons(GETIFMTU_4(ifp)); } else { /* make up a number... */ icmp->icmp_nextmtu = htons(fin->fin_plen - 20); } } #endif bcopy((char *)fin->fin_ip, (char *)ip2, ohlen); #ifdef USE_INET6 ip6 = (ip6_t *)ip; if (fin->fin_v == 6) { ip6->ip6_flow = ((ip6_t *)fin->fin_ip)->ip6_flow; ip6->ip6_plen = htons(iclen - hlen); ip6->ip6_nxt = IPPROTO_ICMPV6; ip6->ip6_hlim = 0; ip6->ip6_src = dst6.in6; ip6->ip6_dst = fin->fin_src6.in6; if (xtra > 0) bcopy((char *)fin->fin_ip + ohlen, (char *)&icmp->icmp_ip + ohlen, xtra); icmp->icmp_cksum = in6_cksum(m, IPPROTO_ICMPV6, sizeof(*ip6), iclen - hlen); } else #endif { ip->ip_p = IPPROTO_ICMP; ip->ip_src.s_addr = dst4.s_addr; ip->ip_dst.s_addr = fin->fin_saddr; if (xtra > 0) bcopy((char *)fin->fin_ip + ohlen, (char *)&icmp->icmp_ip + ohlen, xtra); icmp->icmp_cksum = ipf_cksum((u_short *)icmp, sizeof(*icmp) + 8); ip->ip_len = htons(iclen); ip->ip_p = IPPROTO_ICMP; } err = ipf_send_ip(fin, m); return err; } /* * m0 - pointer to mbuf where the IP packet starts * mpp - pointer to the mbuf pointer that is the start of the mbuf chain */ int ipf_fastroute(m0, mpp, fin, fdp) mb_t *m0, **mpp; fr_info_t *fin; frdest_t *fdp; { register struct ip *ip, *mhip; register struct mbuf *m = *mpp; int len, off, error = 0, hlen, code; struct ifnet *ifp, *sifp; struct sockaddr_in dst; struct nhop4_extended nh4; int has_nhop = 0; u_long fibnum = 0; u_short ip_off; frdest_t node; frentry_t *fr; #ifdef M_WRITABLE /* * HOT FIX/KLUDGE: * * If the mbuf we're about to send is not writable (because of * a cluster reference, for example) we'll need to make a copy * of it since this routine modifies the contents. * * If you have non-crappy network hardware that can transmit data * from the mbuf, rather than making a copy, this is gonna be a * problem. */ if (M_WRITABLE(m) == 0) { m0 = m_dup(m, M_NOWAIT); if (m0 != NULL) { FREE_MB_T(m); m = m0; *mpp = m; } else { error = ENOBUFS; FREE_MB_T(m); goto done; } } #endif #ifdef USE_INET6 if (fin->fin_v == 6) { /* * currently "to " and "to :ip#" are not supported * for IPv6 */ return ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL); } #endif hlen = fin->fin_hlen; ip = mtod(m0, struct ip *); ifp = NULL; /* * Route packet. */ bzero(&dst, sizeof (dst)); dst.sin_family = AF_INET; dst.sin_addr = ip->ip_dst; dst.sin_len = sizeof(dst); fr = fin->fin_fr; if ((fr != NULL) && !(fr->fr_flags & FR_KEEPSTATE) && (fdp != NULL) && (fdp->fd_type == FRD_DSTLIST)) { if (ipf_dstlist_select_node(fin, fdp->fd_ptr, NULL, &node) == 0) fdp = &node; } if (fdp != NULL) ifp = fdp->fd_ptr; else ifp = fin->fin_ifp; if ((ifp == NULL) && ((fr == NULL) || !(fr->fr_flags & FR_FASTROUTE))) { error = -2; goto bad; } if ((fdp != NULL) && (fdp->fd_ip.s_addr != 0)) dst.sin_addr = fdp->fd_ip; fibnum = M_GETFIB(m0); if (fib4_lookup_nh_ext(fibnum, dst.sin_addr, NHR_REF, 0, &nh4) != 0) { if (in_localaddr(ip->ip_dst)) error = EHOSTUNREACH; else error = ENETUNREACH; goto bad; } has_nhop = 1; if (ifp == NULL) ifp = nh4.nh_ifp; if (nh4.nh_flags & NHF_GATEWAY) dst.sin_addr = nh4.nh_addr; /* * For input packets which are being "fastrouted", they won't * go back through output filtering and miss their chance to get * NAT'd and counted. Duplicated packets aren't considered to be * part of the normal packet stream, so do not NAT them or pass * them through stateful checking, etc. */ if ((fdp != &fr->fr_dif) && (fin->fin_out == 0)) { sifp = fin->fin_ifp; fin->fin_ifp = ifp; fin->fin_out = 1; (void) ipf_acctpkt(fin, NULL); fin->fin_fr = NULL; if (!fr || !(fr->fr_flags & FR_RETMASK)) { u_32_t pass; (void) ipf_state_check(fin, &pass); } switch (ipf_nat_checkout(fin, NULL)) { case 0 : break; case 1 : ip->ip_sum = 0; break; case -1 : error = -1; goto bad; break; } fin->fin_ifp = sifp; fin->fin_out = 0; } else ip->ip_sum = 0; /* * If small enough for interface, can just send directly. */ if (ntohs(ip->ip_len) <= ifp->if_mtu) { if (!ip->ip_sum) ip->ip_sum = in_cksum(m, hlen); error = (*ifp->if_output)(ifp, m, (struct sockaddr *)&dst, NULL ); goto done; } /* * Too large for interface; fragment if possible. * Must be able to put at least 8 bytes per fragment. */ ip_off = ntohs(ip->ip_off); if (ip_off & IP_DF) { error = EMSGSIZE; goto bad; } len = (ifp->if_mtu - hlen) &~ 7; if (len < 8) { error = EMSGSIZE; goto bad; } { int mhlen, firstlen = len; struct mbuf **mnext = &m->m_act; /* * Loop through length of segment after first fragment, * make new header and copy data of each part and link onto chain. */ m0 = m; mhlen = sizeof (struct ip); for (off = hlen + len; off < ntohs(ip->ip_len); off += len) { #ifdef MGETHDR MGETHDR(m, M_NOWAIT, MT_HEADER); #else MGET(m, M_NOWAIT, MT_HEADER); #endif if (m == NULL) { m = m0; error = ENOBUFS; goto bad; } m->m_data += max_linkhdr; mhip = mtod(m, struct ip *); bcopy((char *)ip, (char *)mhip, sizeof(*ip)); if (hlen > sizeof (struct ip)) { mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip); IP_HL_A(mhip, mhlen >> 2); } m->m_len = mhlen; mhip->ip_off = ((off - hlen) >> 3) + ip_off; if (off + len >= ntohs(ip->ip_len)) len = ntohs(ip->ip_len) - off; else mhip->ip_off |= IP_MF; mhip->ip_len = htons((u_short)(len + mhlen)); *mnext = m; m->m_next = m_copym(m0, off, len, M_NOWAIT); if (m->m_next == 0) { error = ENOBUFS; /* ??? */ goto sendorfree; } m->m_pkthdr.len = mhlen + len; m->m_pkthdr.rcvif = NULL; mhip->ip_off = htons((u_short)mhip->ip_off); mhip->ip_sum = 0; mhip->ip_sum = in_cksum(m, mhlen); mnext = &m->m_act; } /* * Update first fragment by trimming what's been copied out * and updating header, then send each fragment (in order). */ m_adj(m0, hlen + firstlen - ip->ip_len); ip->ip_len = htons((u_short)(hlen + firstlen)); ip->ip_off = htons((u_short)IP_MF); ip->ip_sum = 0; ip->ip_sum = in_cksum(m0, hlen); sendorfree: for (m = m0; m; m = m0) { m0 = m->m_act; m->m_act = 0; if (error == 0) error = (*ifp->if_output)(ifp, m, (struct sockaddr *)&dst, NULL ); else FREE_MB_T(m); } } done: if (!error) V_ipfmain.ipf_frouteok[0]++; else V_ipfmain.ipf_frouteok[1]++; if (has_nhop) fib4_free_nh_ext(fibnum, &nh4); return 0; bad: if (error == EMSGSIZE) { sifp = fin->fin_ifp; code = fin->fin_icode; fin->fin_icode = ICMP_UNREACH_NEEDFRAG; fin->fin_ifp = ifp; (void) ipf_send_icmp_err(ICMP_UNREACH, fin, 1); fin->fin_ifp = sifp; fin->fin_icode = code; } FREE_MB_T(m); goto done; } int ipf_verifysrc(fin) fr_info_t *fin; { struct nhop4_basic nh4; if (fib4_lookup_nh_basic(0, fin->fin_src, 0, 0, &nh4) != 0) return (0); return (fin->fin_ifp == nh4.nh_ifp); } /* * return the first IP Address associated with an interface */ int ipf_ifpaddr(softc, v, atype, ifptr, inp, inpmask) ipf_main_softc_t *softc; int v, atype; void *ifptr; i6addr_t *inp, *inpmask; { #ifdef USE_INET6 struct in6_addr *inp6 = NULL; #endif struct sockaddr *sock, *mask; struct sockaddr_in *sin; struct ifaddr *ifa; struct ifnet *ifp; if ((ifptr == NULL) || (ifptr == (void *)-1)) return -1; sin = NULL; ifp = ifptr; if (v == 4) inp->in4.s_addr = 0; #ifdef USE_INET6 else if (v == 6) bzero((char *)inp, sizeof(*inp)); #endif ifa = CK_STAILQ_FIRST(&ifp->if_addrhead); sock = ifa->ifa_addr; while (sock != NULL && ifa != NULL) { sin = (struct sockaddr_in *)sock; if ((v == 4) && (sin->sin_family == AF_INET)) break; #ifdef USE_INET6 if ((v == 6) && (sin->sin_family == AF_INET6)) { inp6 = &((struct sockaddr_in6 *)sin)->sin6_addr; if (!IN6_IS_ADDR_LINKLOCAL(inp6) && !IN6_IS_ADDR_LOOPBACK(inp6)) break; } #endif ifa = CK_STAILQ_NEXT(ifa, ifa_link); if (ifa != NULL) sock = ifa->ifa_addr; } if (ifa == NULL || sin == NULL) return -1; mask = ifa->ifa_netmask; if (atype == FRI_BROADCAST) sock = ifa->ifa_broadaddr; else if (atype == FRI_PEERADDR) sock = ifa->ifa_dstaddr; if (sock == NULL) return -1; #ifdef USE_INET6 if (v == 6) { return ipf_ifpfillv6addr(atype, (struct sockaddr_in6 *)sock, (struct sockaddr_in6 *)mask, inp, inpmask); } #endif return ipf_ifpfillv4addr(atype, (struct sockaddr_in *)sock, (struct sockaddr_in *)mask, &inp->in4, &inpmask->in4); } u_32_t ipf_newisn(fin) fr_info_t *fin; { u_32_t newiss; newiss = arc4random(); return newiss; } INLINE int ipf_checkv4sum(fin) fr_info_t *fin; { #ifdef CSUM_DATA_VALID int manual = 0; u_short sum; ip_t *ip; mb_t *m; if ((fin->fin_flx & FI_NOCKSUM) != 0) return 0; if ((fin->fin_flx & FI_SHORT) != 0) return 1; if (fin->fin_cksum != FI_CK_NEEDED) return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1; m = fin->fin_m; if (m == NULL) { manual = 1; goto skipauto; } ip = fin->fin_ip; if ((m->m_pkthdr.csum_flags & (CSUM_IP_CHECKED|CSUM_IP_VALID)) == CSUM_IP_CHECKED) { fin->fin_cksum = FI_CK_BAD; fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_checkv4sum_csum_ip_checked, fr_info_t *, fin, u_int, m->m_pkthdr.csum_flags & (CSUM_IP_CHECKED|CSUM_IP_VALID)); return -1; } if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { /* Depending on the driver, UDP may have zero checksum */ if (fin->fin_p == IPPROTO_UDP && (fin->fin_flx & (FI_FRAG|FI_SHORT|FI_BAD)) == 0) { udphdr_t *udp = fin->fin_dp; if (udp->uh_sum == 0) { /* * we're good no matter what the hardware * checksum flags and csum_data say (handling * of csum_data for zero UDP checksum is not * consistent across all drivers) */ fin->fin_cksum = 1; return 0; } } if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) sum = m->m_pkthdr.csum_data; else sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htonl(m->m_pkthdr.csum_data + fin->fin_dlen + fin->fin_p)); sum ^= 0xffff; if (sum != 0) { fin->fin_cksum = FI_CK_BAD; fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_checkv4sum_sum, fr_info_t *, fin, u_int, sum); } else { fin->fin_cksum = FI_CK_SUMOK; return 0; } } else { if (m->m_pkthdr.csum_flags == CSUM_DELAY_DATA) { fin->fin_cksum = FI_CK_L4FULL; return 0; } else if (m->m_pkthdr.csum_flags == CSUM_TCP || m->m_pkthdr.csum_flags == CSUM_UDP) { fin->fin_cksum = FI_CK_L4PART; return 0; } else if (m->m_pkthdr.csum_flags == CSUM_IP) { fin->fin_cksum = FI_CK_L4PART; return 0; } else { manual = 1; } } skipauto: if (manual != 0) { if (ipf_checkl4sum(fin) == -1) { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_checkv4sum_manual, fr_info_t *, fin, u_int, manual); return -1; } } #else if (ipf_checkl4sum(fin) == -1) { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_checkv4sum_checkl4sum, fr_info_t *, fin, u_int, -1); return -1; } #endif return 0; } #ifdef USE_INET6 INLINE int ipf_checkv6sum(fin) fr_info_t *fin; { if ((fin->fin_flx & FI_NOCKSUM) != 0) { DT(ipf_checkv6sum_fi_nocksum); return 0; } if ((fin->fin_flx & FI_SHORT) != 0) { DT(ipf_checkv6sum_fi_short); return 1; } if (fin->fin_cksum != FI_CK_NEEDED) { DT(ipf_checkv6sum_fi_ck_needed); return (fin->fin_cksum > FI_CK_NEEDED) ? 0 : -1; } if (ipf_checkl4sum(fin) == -1) { fin->fin_flx |= FI_BAD; DT2(ipf_fi_bad_checkv6sum_checkl4sum, fr_info_t *, fin, u_int, -1); return -1; } return 0; } #endif /* USE_INET6 */ size_t mbufchainlen(m0) struct mbuf *m0; { size_t len; if ((m0->m_flags & M_PKTHDR) != 0) { len = m0->m_pkthdr.len; } else { struct mbuf *m; for (m = m0, len = 0; m != NULL; m = m->m_next) len += m->m_len; } return len; } /* ------------------------------------------------------------------------ */ /* Function: ipf_pullup */ /* Returns: NULL == pullup failed, else pointer to protocol header */ /* Parameters: xmin(I)- pointer to buffer where data packet starts */ /* fin(I) - pointer to packet information */ /* len(I) - number of bytes to pullup */ /* */ /* Attempt to move at least len bytes (from the start of the buffer) into a */ /* single buffer for ease of access. Operating system native functions are */ /* used to manage buffers - if necessary. If the entire packet ends up in */ /* a single buffer, set the FI_COALESCE flag even though ipf_coalesce() has */ /* not been called. Both fin_ip and fin_dp are updated before exiting _IF_ */ /* and ONLY if the pullup succeeds. */ /* */ /* We assume that 'xmin' is a pointer to a buffer that is part of the chain */ /* of buffers that starts at *fin->fin_mp. */ /* ------------------------------------------------------------------------ */ void * ipf_pullup(xmin, fin, len) mb_t *xmin; fr_info_t *fin; int len; { int dpoff, ipoff; mb_t *m = xmin; char *ip; if (m == NULL) return NULL; ip = (char *)fin->fin_ip; if ((fin->fin_flx & FI_COALESCE) != 0) return ip; ipoff = fin->fin_ipoff; if (fin->fin_dp != NULL) dpoff = (char *)fin->fin_dp - (char *)ip; else dpoff = 0; if (M_LEN(m) < len) { mb_t *n = *fin->fin_mp; /* * Assume that M_PKTHDR is set and just work with what is left * rather than check.. * Should not make any real difference, anyway. */ if (m != n) { /* * Record the mbuf that points to the mbuf that we're * about to go to work on so that we can update the * m_next appropriately later. */ for (; n->m_next != m; n = n->m_next) ; } else { n = NULL; } #ifdef MHLEN if (len > MHLEN) #else if (len > MLEN) #endif { #ifdef HAVE_M_PULLDOWN if (m_pulldown(m, 0, len, NULL) == NULL) m = NULL; #else FREE_MB_T(*fin->fin_mp); m = NULL; n = NULL; #endif } else { m = m_pullup(m, len); } if (n != NULL) n->m_next = m; if (m == NULL) { /* * When n is non-NULL, it indicates that m pointed to * a sub-chain (tail) of the mbuf and that the head * of this chain has not yet been free'd. */ if (n != NULL) { FREE_MB_T(*fin->fin_mp); } *fin->fin_mp = NULL; fin->fin_m = NULL; return NULL; } if (n == NULL) *fin->fin_mp = m; while (M_LEN(m) == 0) { m = m->m_next; } fin->fin_m = m; ip = MTOD(m, char *) + ipoff; fin->fin_ip = (ip_t *)ip; if (fin->fin_dp != NULL) fin->fin_dp = (char *)fin->fin_ip + dpoff; if (fin->fin_fraghdr != NULL) fin->fin_fraghdr = (char *)ip + ((char *)fin->fin_fraghdr - (char *)fin->fin_ip); } if (len == fin->fin_plen) fin->fin_flx |= FI_COALESCE; return ip; } int ipf_inject(fin, m) fr_info_t *fin; mb_t *m; { int error = 0; if (fin->fin_out == 0) { netisr_dispatch(NETISR_IP, m); } else { fin->fin_ip->ip_len = ntohs(fin->fin_ip->ip_len); fin->fin_ip->ip_off = ntohs(fin->fin_ip->ip_off); error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL); } return error; } int ipf_pfil_unhook(void) { #if defined(NETBSD_PF) && (__FreeBSD_version >= 500011) struct pfil_head *ph_inet; # ifdef USE_INET6 struct pfil_head *ph_inet6; # endif #endif #ifdef NETBSD_PF ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); if (ph_inet != NULL) pfil_remove_hook((void *)ipf_check_wrapper, NULL, PFIL_IN|PFIL_OUT|PFIL_WAITOK, ph_inet); # ifdef USE_INET6 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); if (ph_inet6 != NULL) pfil_remove_hook((void *)ipf_check_wrapper6, NULL, PFIL_IN|PFIL_OUT|PFIL_WAITOK, ph_inet6); # endif #endif return (0); } int ipf_pfil_hook(void) { #if defined(NETBSD_PF) && (__FreeBSD_version >= 500011) struct pfil_head *ph_inet; # ifdef USE_INET6 struct pfil_head *ph_inet6; # endif #endif # ifdef NETBSD_PF ph_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); # ifdef USE_INET6 ph_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); # endif if (ph_inet == NULL # ifdef USE_INET6 && ph_inet6 == NULL # endif ) { return ENODEV; } if (ph_inet != NULL) pfil_add_hook((void *)ipf_check_wrapper, NULL, PFIL_IN|PFIL_OUT|PFIL_WAITOK, ph_inet); # ifdef USE_INET6 if (ph_inet6 != NULL) pfil_add_hook((void *)ipf_check_wrapper6, NULL, PFIL_IN|PFIL_OUT|PFIL_WAITOK, ph_inet6); # endif # endif return (0); } void ipf_event_reg(void) { ipf_arrivetag = EVENTHANDLER_REGISTER(ifnet_arrival_event, \ ipf_ifevent, NULL, \ EVENTHANDLER_PRI_ANY); ipf_departtag = EVENTHANDLER_REGISTER(ifnet_departure_event, \ ipf_ifevent, NULL, \ EVENTHANDLER_PRI_ANY); #if 0 ipf_clonetag = EVENTHANDLER_REGISTER(if_clone_event, ipf_ifevent, \ NULL, EVENTHANDLER_PRI_ANY); #endif } void ipf_event_dereg(void) { if (ipf_arrivetag != NULL) { EVENTHANDLER_DEREGISTER(ifnet_arrival_event, ipf_arrivetag); } if (ipf_departtag != NULL) { EVENTHANDLER_DEREGISTER(ifnet_departure_event, ipf_departtag); } #if 0 if (ipf_clonetag != NULL) { EVENTHANDLER_DEREGISTER(if_clone_event, ipf_clonetag); } #endif } u_32_t ipf_random() { return arc4random(); } u_int ipf_pcksum(fin, hlen, sum) fr_info_t *fin; int hlen; u_int sum; { struct mbuf *m; u_int sum2; int off; m = fin->fin_m; off = (char *)fin->fin_dp - (char *)fin->fin_ip; m->m_data += hlen; m->m_len -= hlen; sum2 = in_cksum(fin->fin_m, fin->fin_plen - off); m->m_len += hlen; m->m_data -= hlen; /* * Both sum and sum2 are partial sums, so combine them together. */ sum += ~sum2 & 0xffff; while (sum > 0xffff) sum = (sum & 0xffff) + (sum >> 16); sum2 = ~sum & 0xffff; return sum2; } Index: projects/clang700-import/sys/contrib/ipfilter =================================================================== --- projects/clang700-import/sys/contrib/ipfilter (revision 340917) +++ projects/clang700-import/sys/contrib/ipfilter (revision 340918) Property changes on: projects/clang700-import/sys/contrib/ipfilter ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys/contrib/ipfilter:r340869-340917 Index: projects/clang700-import/sys/dev/atkbdc/atkbdc.c =================================================================== --- projects/clang700-import/sys/dev/atkbdc/atkbdc.c (revision 340917) +++ projects/clang700-import/sys/dev/atkbdc/atkbdc.c (revision 340918) @@ -1,1203 +1,1284 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1996-1999 * Kazutaka YOKOTA (yokota@zodiac.mech.utsunomiya-u.ac.jp) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from kbdio.c,v 1.13 1998/09/25 11:55:46 yokota Exp */ #include __FBSDID("$FreeBSD$"); #include "opt_kbd.h" #include #include #include #include #include #include #include #include #if defined(__amd64__) #include #endif #include #ifdef __sparc64__ #include #include #include #else #include #endif /* constants */ #define MAXKBDC 1 /* XXX */ /* macros */ #ifndef MAX #define MAX(x, y) ((x) > (y) ? (x) : (y)) #endif #define kbdcp(p) ((atkbdc_softc_t *)(p)) #define nextq(i) (((i) + 1) % KBDQ_BUFSIZE) #define availq(q) ((q)->head != (q)->tail) #if KBDIO_DEBUG >= 2 #define emptyq(q) ((q)->tail = (q)->head = (q)->qcount = 0) #else #define emptyq(q) ((q)->tail = (q)->head = 0) #endif #define read_data(k) (bus_space_read_1((k)->iot, (k)->ioh0, 0)) #define read_status(k) (bus_space_read_1((k)->iot, (k)->ioh1, 0)) #define write_data(k, d) \ (bus_space_write_1((k)->iot, (k)->ioh0, 0, (d))) #define write_command(k, d) \ (bus_space_write_1((k)->iot, (k)->ioh1, 0, (d))) /* local variables */ /* * We always need at least one copy of the kbdc_softc struct for the * low-level console. As the low-level console accesses the keyboard * controller before kbdc, and all other devices, is probed, we * statically allocate one entry. XXX */ static atkbdc_softc_t default_kbdc; static atkbdc_softc_t *atkbdc_softc[MAXKBDC] = { &default_kbdc }; static int verbose = KBDIO_DEBUG; #ifdef __sparc64__ static struct bus_space_tag atkbdc_bst_store[MAXKBDC]; #endif /* function prototypes */ static int atkbdc_setup(atkbdc_softc_t *sc, bus_space_tag_t tag, bus_space_handle_t h0, bus_space_handle_t h1); static int addq(kqueue *q, int c); static int removeq(kqueue *q); static int wait_while_controller_busy(atkbdc_softc_t *kbdc); static int wait_for_data(atkbdc_softc_t *kbdc); static int wait_for_kbd_data(atkbdc_softc_t *kbdc); static int wait_for_kbd_ack(atkbdc_softc_t *kbdc); static int wait_for_aux_data(atkbdc_softc_t *kbdc); static int wait_for_aux_ack(atkbdc_softc_t *kbdc); struct atkbdc_quirks { const char* bios_vendor; const char* maker; const char* product; int quirk; }; static struct atkbdc_quirks quirks[] = { {"coreboot", NULL, NULL, KBDC_QUIRK_KEEP_ACTIVATED | KBDC_QUIRK_IGNORE_PROBE_RESULT | KBDC_QUIRK_RESET_AFTER_PROBE | KBDC_QUIRK_SETLEDS_ON_INIT}, {NULL, NULL, NULL, 0} }; #define QUIRK_STR_MATCH(s1, s2) (s1 == NULL || \ (s2 != NULL && !strcmp(s1, s2))) static int atkbdc_getquirks(void) { int i; char* bios_vendor = kern_getenv("smbios.bios.vendor"); char* maker = kern_getenv("smbios.system.maker"); char* product = kern_getenv("smbios.system.product"); for (i=0; quirks[i].quirk != 0; ++i) if (QUIRK_STR_MATCH(quirks[i].bios_vendor, bios_vendor) && QUIRK_STR_MATCH(quirks[i].maker, maker) && QUIRK_STR_MATCH(quirks[i].product, product)) return (quirks[i].quirk); return (0); } atkbdc_softc_t *atkbdc_get_softc(int unit) { atkbdc_softc_t *sc; if (unit >= nitems(atkbdc_softc)) return NULL; sc = atkbdc_softc[unit]; if (sc == NULL) { sc = atkbdc_softc[unit] = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc == NULL) return NULL; } return sc; } int atkbdc_probe_unit(int unit, struct resource *port0, struct resource *port1) { if (rman_get_start(port0) <= 0) return ENXIO; if (rman_get_start(port1) <= 0) return ENXIO; return 0; } int atkbdc_attach_unit(int unit, atkbdc_softc_t *sc, struct resource *port0, struct resource *port1) { return atkbdc_setup(sc, rman_get_bustag(port0), rman_get_bushandle(port0), rman_get_bushandle(port1)); } /* the backdoor to the keyboard controller! XXX */ int atkbdc_configure(void) { bus_space_tag_t tag; bus_space_handle_t h0; bus_space_handle_t h1; #if defined(__i386__) || defined(__amd64__) volatile int i; register_t flags; #endif #ifdef __sparc64__ char name[32]; phandle_t chosen, node; ihandle_t stdin; bus_addr_t port0; bus_addr_t port1; int space; #else int port0; int port1; #endif /* XXX: tag should be passed from the caller */ #if defined(__amd64__) || defined(__i386__) tag = X86_BUS_SPACE_IO; #elif defined(__sparc64__) tag = &atkbdc_bst_store[0]; #else #error "define tag!" #endif #ifdef __sparc64__ if ((chosen = OF_finddevice("/chosen")) == -1) return 0; if (OF_getprop(chosen, "stdin", &stdin, sizeof(stdin)) == -1) return 0; if ((node = OF_instance_to_package(stdin)) == -1) return 0; if (OF_getprop(node, "name", name, sizeof(name)) == -1) return 0; name[sizeof(name) - 1] = '\0'; if (strcmp(name, "kb_ps2") != 0) return 0; /* * The stdin handle points to an instance of a PS/2 keyboard * package but we want the 8042 controller, which is the parent * of that keyboard node. */ if ((node = OF_parent(node)) == 0) return 0; if (OF_decode_addr(node, 0, &space, &port0) != 0) return 0; h0 = sparc64_fake_bustag(space, port0, tag); bus_space_subregion(tag, h0, KBD_DATA_PORT, 1, &h0); if (OF_decode_addr(node, 1, &space, &port1) != 0) return 0; h1 = sparc64_fake_bustag(space, port1, tag); bus_space_subregion(tag, h1, KBD_STATUS_PORT, 1, &h1); #else port0 = IO_KBD; resource_int_value("atkbdc", 0, "port", &port0); port1 = IO_KBD + KBD_STATUS_PORT; #ifdef notyet bus_space_map(tag, port0, IO_KBDSIZE, 0, &h0); bus_space_map(tag, port1, IO_KBDSIZE, 0, &h1); #else h0 = (bus_space_handle_t)port0; h1 = (bus_space_handle_t)port1; #endif #endif #if defined(__i386__) || defined(__amd64__) /* * Check if we really have AT keyboard controller. Poll status * register until we get "all clear" indication. If no such * indication comes, it probably means that there is no AT * keyboard controller present. Give up in such case. Check relies * on the fact that reading from non-existing in/out port returns * 0xff on i386. May or may not be true on other platforms. */ flags = intr_disable(); for (i = 0; i != 65535; i++) { if ((bus_space_read_1(tag, h1, 0) & 0x2) == 0) break; } intr_restore(flags); if (i == 65535) return ENXIO; #endif return atkbdc_setup(atkbdc_softc[0], tag, h0, h1); } static int atkbdc_setup(atkbdc_softc_t *sc, bus_space_tag_t tag, bus_space_handle_t h0, bus_space_handle_t h1) { #if defined(__amd64__) u_int64_t tscval[3], read_delay; register_t flags; #endif if (sc->ioh0 == 0) { /* XXX */ sc->command_byte = -1; sc->command_mask = 0; sc->lock = FALSE; sc->kbd.head = sc->kbd.tail = 0; sc->aux.head = sc->aux.tail = 0; + sc->aux_mux_enabled = FALSE; #if KBDIO_DEBUG >= 2 sc->kbd.call_count = 0; sc->kbd.qcount = sc->kbd.max_qcount = 0; sc->aux.call_count = 0; sc->aux.qcount = sc->aux.max_qcount = 0; #endif } sc->iot = tag; sc->ioh0 = h0; sc->ioh1 = h1; #if defined(__amd64__) /* * On certain chipsets AT keyboard controller isn't present and is * emulated by BIOS using SMI interrupt. On those chipsets reading * from the status port may be thousand times slower than usually. * Sometimes this emilation is not working properly resulting in * commands timing our and since we assume that inb() operation * takes very little time to complete we need to adjust number of * retries to keep waiting time within a designed limits (100ms). * Measure time it takes to make read_status() call and adjust * number of retries accordingly. */ flags = intr_disable(); tscval[0] = rdtsc(); read_status(sc); tscval[1] = rdtsc(); DELAY(1000); tscval[2] = rdtsc(); intr_restore(flags); read_delay = tscval[1] - tscval[0]; read_delay /= (tscval[2] - tscval[1]) / 1000; sc->retry = 100000 / ((KBDD_DELAYTIME * 2) + read_delay); #else sc->retry = 5000; #endif sc->quirks = atkbdc_getquirks(); return 0; } /* open a keyboard controller */ KBDC atkbdc_open(int unit) { if (unit <= 0) unit = 0; if (unit >= MAXKBDC) return NULL; if ((atkbdc_softc[unit]->port0 != NULL) || (atkbdc_softc[unit]->ioh0 != 0)) /* XXX */ return (KBDC)atkbdc_softc[unit]; return NULL; } /* * I/O access arbitration in `kbdio' * * The `kbdio' module uses a simplistic convention to arbitrate * I/O access to the controller/keyboard/mouse. The convention requires * close cooperation of the calling device driver. * * The device drivers which utilize the `kbdio' module are assumed to * have the following set of routines. * a. An interrupt handler (the bottom half of the driver). * b. Timeout routines which may briefly poll the keyboard controller. * c. Routines outside interrupt context (the top half of the driver). * They should follow the rules below: * 1. The interrupt handler may assume that it always has full access * to the controller/keyboard/mouse. * 2. The other routines must issue `spltty()' if they wish to * prevent the interrupt handler from accessing * the controller/keyboard/mouse. * 3. The timeout routines and the top half routines of the device driver * arbitrate I/O access by observing the lock flag in `kbdio'. * The flag is manipulated via `kbdc_lock()'; when one wants to * perform I/O, call `kbdc_lock(kbdc, TRUE)' and proceed only if * the call returns with TRUE. Otherwise the caller must back off. * Call `kbdc_lock(kbdc, FALSE)' when necessary I/O operaion * is finished. This mechanism does not prevent the interrupt * handler from being invoked at any time and carrying out I/O. * Therefore, `spltty()' must be strategically placed in the device * driver code. Also note that the timeout routine may interrupt * `kbdc_lock()' called by the top half of the driver, but this * interruption is OK so long as the timeout routine observes * rule 4 below. * 4. The interrupt and timeout routines should not extend I/O operation * across more than one interrupt or timeout; they must complete any * necessary I/O operation within one invocation of the routine. * This means that if the timeout routine acquires the lock flag, * it must reset the flag to FALSE before it returns. */ /* set/reset polling lock */ int kbdc_lock(KBDC p, int lock) { int prevlock; prevlock = kbdcp(p)->lock; kbdcp(p)->lock = lock; return (prevlock != lock); } /* check if any data is waiting to be processed */ int kbdc_data_ready(KBDC p) { return (availq(&kbdcp(p)->kbd) || availq(&kbdcp(p)->aux) || (read_status(kbdcp(p)) & KBDS_ANY_BUFFER_FULL)); } /* queuing functions */ static int addq(kqueue *q, int c) { if (nextq(q->tail) != q->head) { q->q[q->tail] = c; q->tail = nextq(q->tail); #if KBDIO_DEBUG >= 2 ++q->call_count; ++q->qcount; if (q->qcount > q->max_qcount) q->max_qcount = q->qcount; #endif return TRUE; } return FALSE; } static int removeq(kqueue *q) { int c; if (q->tail != q->head) { c = q->q[q->head]; q->head = nextq(q->head); #if KBDIO_DEBUG >= 2 --q->qcount; #endif return c; } return -1; } /* * device I/O routines */ static int wait_while_controller_busy(struct atkbdc_softc *kbdc) { int retry; int f; /* CPU will stay inside the loop for 100msec at most */ retry = kbdc->retry; while ((f = read_status(kbdc)) & KBDS_INPUT_BUFFER_FULL) { if ((f & KBDS_BUFFER_FULL) == KBDS_KBD_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdc->kbd, read_data(kbdc)); } else if ((f & KBDS_BUFFER_FULL) == KBDS_AUX_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdc->aux, read_data(kbdc)); } DELAY(KBDC_DELAYTIME); if (--retry < 0) return FALSE; } return TRUE; } /* * wait for any data; whether it's from the controller, * the keyboard, or the aux device. */ static int wait_for_data(struct atkbdc_softc *kbdc) { int retry; int f; /* CPU will stay inside the loop for 200msec at most */ retry = kbdc->retry * 2; while ((f = read_status(kbdc) & KBDS_ANY_BUFFER_FULL) == 0) { DELAY(KBDC_DELAYTIME); if (--retry < 0) return 0; } DELAY(KBDD_DELAYTIME); return f; } /* wait for data from the keyboard */ static int wait_for_kbd_data(struct atkbdc_softc *kbdc) { int retry; int f; /* CPU will stay inside the loop for 200msec at most */ retry = kbdc->retry * 2; while ((f = read_status(kbdc) & KBDS_BUFFER_FULL) != KBDS_KBD_BUFFER_FULL) { if (f == KBDS_AUX_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdc->aux, read_data(kbdc)); } DELAY(KBDC_DELAYTIME); if (--retry < 0) return 0; } DELAY(KBDD_DELAYTIME); return f; } /* * wait for an ACK(FAh), RESEND(FEh), or RESET_FAIL(FCh) from the keyboard. * queue anything else. */ static int wait_for_kbd_ack(struct atkbdc_softc *kbdc) { int retry; int f; int b; /* CPU will stay inside the loop for 200msec at most */ retry = kbdc->retry * 2; while (retry-- > 0) { if ((f = read_status(kbdc)) & KBDS_ANY_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); b = read_data(kbdc); if ((f & KBDS_BUFFER_FULL) == KBDS_KBD_BUFFER_FULL) { if ((b == KBD_ACK) || (b == KBD_RESEND) || (b == KBD_RESET_FAIL)) return b; addq(&kbdc->kbd, b); } else if ((f & KBDS_BUFFER_FULL) == KBDS_AUX_BUFFER_FULL) { addq(&kbdc->aux, b); } } DELAY(KBDC_DELAYTIME); } return -1; } /* wait for data from the aux device */ static int wait_for_aux_data(struct atkbdc_softc *kbdc) { int retry; int f; /* CPU will stay inside the loop for 200msec at most */ retry = kbdc->retry * 2; while ((f = read_status(kbdc) & KBDS_BUFFER_FULL) != KBDS_AUX_BUFFER_FULL) { if (f == KBDS_KBD_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdc->kbd, read_data(kbdc)); } DELAY(KBDC_DELAYTIME); if (--retry < 0) return 0; } DELAY(KBDD_DELAYTIME); return f; } /* * wait for an ACK(FAh), RESEND(FEh), or RESET_FAIL(FCh) from the aux device. * queue anything else. */ static int wait_for_aux_ack(struct atkbdc_softc *kbdc) { int retry; int f; int b; /* CPU will stay inside the loop for 200msec at most */ retry = kbdc->retry * 2; while (retry-- > 0) { if ((f = read_status(kbdc)) & KBDS_ANY_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); b = read_data(kbdc); if ((f & KBDS_BUFFER_FULL) == KBDS_AUX_BUFFER_FULL) { if ((b == PSM_ACK) || (b == PSM_RESEND) || (b == PSM_RESET_FAIL)) return b; addq(&kbdc->aux, b); } else if ((f & KBDS_BUFFER_FULL) == KBDS_KBD_BUFFER_FULL) { addq(&kbdc->kbd, b); } } DELAY(KBDC_DELAYTIME); } return -1; } /* write a one byte command to the controller */ int write_controller_command(KBDC p, int c) { if (!wait_while_controller_busy(kbdcp(p))) return FALSE; write_command(kbdcp(p), c); return TRUE; } /* write a one byte data to the controller */ int write_controller_data(KBDC p, int c) { if (!wait_while_controller_busy(kbdcp(p))) return FALSE; write_data(kbdcp(p), c); return TRUE; } /* write a one byte keyboard command */ int write_kbd_command(KBDC p, int c) { if (!wait_while_controller_busy(kbdcp(p))) return FALSE; write_data(kbdcp(p), c); return TRUE; } /* write a one byte auxiliary device command */ int write_aux_command(KBDC p, int c) { - if (!write_controller_command(p, KBDC_WRITE_TO_AUX)) + int f; + + f = aux_mux_is_enabled(p) ? + KBDC_WRITE_TO_AUX_MUX + kbdcp(p)->aux_mux_port : KBDC_WRITE_TO_AUX; + + if (!write_controller_command(p, f)) return FALSE; return write_controller_data(p, c); } /* send a command to the keyboard and wait for ACK */ int send_kbd_command(KBDC p, int c) { int retry = KBD_MAXRETRY; int res = -1; while (retry-- > 0) { if (!write_kbd_command(p, c)) continue; res = wait_for_kbd_ack(kbdcp(p)); if (res == KBD_ACK) break; } return res; } /* send a command to the auxiliary device and wait for ACK */ int send_aux_command(KBDC p, int c) { int retry = KBD_MAXRETRY; int res = -1; while (retry-- > 0) { if (!write_aux_command(p, c)) continue; /* * FIXME: XXX * The aux device may have already sent one or two bytes of * status data, when a command is received. It will immediately * stop data transmission, thus, leaving an incomplete data * packet in our buffer. We have to discard any unprocessed * data in order to remove such packets. Well, we may remove * unprocessed, but necessary data byte as well... */ emptyq(&kbdcp(p)->aux); res = wait_for_aux_ack(kbdcp(p)); if (res == PSM_ACK) break; } return res; } /* send a command and a data to the keyboard, wait for ACKs */ int send_kbd_command_and_data(KBDC p, int c, int d) { int retry; int res = -1; for (retry = KBD_MAXRETRY; retry > 0; --retry) { if (!write_kbd_command(p, c)) continue; res = wait_for_kbd_ack(kbdcp(p)); if (res == KBD_ACK) break; else if (res != KBD_RESEND) return res; } if (retry <= 0) return res; for (retry = KBD_MAXRETRY, res = -1; retry > 0; --retry) { if (!write_kbd_command(p, d)) continue; res = wait_for_kbd_ack(kbdcp(p)); if (res != KBD_RESEND) break; } return res; } /* send a command and a data to the auxiliary device, wait for ACKs */ int send_aux_command_and_data(KBDC p, int c, int d) { int retry; int res = -1; for (retry = KBD_MAXRETRY; retry > 0; --retry) { if (!write_aux_command(p, c)) continue; emptyq(&kbdcp(p)->aux); res = wait_for_aux_ack(kbdcp(p)); if (res == PSM_ACK) break; else if (res != PSM_RESEND) return res; } if (retry <= 0) return res; for (retry = KBD_MAXRETRY, res = -1; retry > 0; --retry) { if (!write_aux_command(p, d)) continue; res = wait_for_aux_ack(kbdcp(p)); if (res != PSM_RESEND) break; } return res; } /* * read one byte from any source; whether from the controller, * the keyboard, or the aux device */ int read_controller_data(KBDC p) { if (availq(&kbdcp(p)->kbd)) return removeq(&kbdcp(p)->kbd); if (availq(&kbdcp(p)->aux)) return removeq(&kbdcp(p)->aux); if (!wait_for_data(kbdcp(p))) return -1; /* timeout */ return read_data(kbdcp(p)); } #if KBDIO_DEBUG >= 2 static int call = 0; #endif /* read one byte from the keyboard */ int read_kbd_data(KBDC p) { #if KBDIO_DEBUG >= 2 if (++call > 2000) { call = 0; log(LOG_DEBUG, "kbdc: kbd q: %d calls, max %d chars, " "aux q: %d calls, max %d chars\n", kbdcp(p)->kbd.call_count, kbdcp(p)->kbd.max_qcount, kbdcp(p)->aux.call_count, kbdcp(p)->aux.max_qcount); } #endif if (availq(&kbdcp(p)->kbd)) return removeq(&kbdcp(p)->kbd); if (!wait_for_kbd_data(kbdcp(p))) return -1; /* timeout */ return read_data(kbdcp(p)); } /* read one byte from the keyboard, but return immediately if * no data is waiting */ int read_kbd_data_no_wait(KBDC p) { int f; #if KBDIO_DEBUG >= 2 if (++call > 2000) { call = 0; log(LOG_DEBUG, "kbdc: kbd q: %d calls, max %d chars, " "aux q: %d calls, max %d chars\n", kbdcp(p)->kbd.call_count, kbdcp(p)->kbd.max_qcount, kbdcp(p)->aux.call_count, kbdcp(p)->aux.max_qcount); } #endif if (availq(&kbdcp(p)->kbd)) return removeq(&kbdcp(p)->kbd); f = read_status(kbdcp(p)) & KBDS_BUFFER_FULL; if (f == KBDS_AUX_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdcp(p)->aux, read_data(kbdcp(p))); f = read_status(kbdcp(p)) & KBDS_BUFFER_FULL; } if (f == KBDS_KBD_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); return read_data(kbdcp(p)); } return -1; /* no data */ } /* read one byte from the aux device */ int read_aux_data(KBDC p) { if (availq(&kbdcp(p)->aux)) return removeq(&kbdcp(p)->aux); if (!wait_for_aux_data(kbdcp(p))) return -1; /* timeout */ return read_data(kbdcp(p)); } /* read one byte from the aux device, but return immediately if * no data is waiting */ int read_aux_data_no_wait(KBDC p) { int f; if (availq(&kbdcp(p)->aux)) return removeq(&kbdcp(p)->aux); f = read_status(kbdcp(p)) & KBDS_BUFFER_FULL; if (f == KBDS_KBD_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdcp(p)->kbd, read_data(kbdcp(p))); f = read_status(kbdcp(p)) & KBDS_BUFFER_FULL; } if (f == KBDS_AUX_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); return read_data(kbdcp(p)); } return -1; /* no data */ } /* discard data from the keyboard */ void empty_kbd_buffer(KBDC p, int wait) { int t; int b; int f; #if KBDIO_DEBUG >= 2 int c1 = 0; int c2 = 0; #endif int delta = 2; for (t = wait; t > 0; ) { if ((f = read_status(kbdcp(p))) & KBDS_ANY_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); b = read_data(kbdcp(p)); if ((f & KBDS_BUFFER_FULL) == KBDS_AUX_BUFFER_FULL) { addq(&kbdcp(p)->aux, b); #if KBDIO_DEBUG >= 2 ++c2; } else { ++c1; #endif } t = wait; } else { t -= delta; } DELAY(delta*1000); } #if KBDIO_DEBUG >= 2 if ((c1 > 0) || (c2 > 0)) log(LOG_DEBUG, "kbdc: %d:%d char read (empty_kbd_buffer)\n", c1, c2); #endif emptyq(&kbdcp(p)->kbd); } /* discard data from the aux device */ void empty_aux_buffer(KBDC p, int wait) { int t; int b; int f; #if KBDIO_DEBUG >= 2 int c1 = 0; int c2 = 0; #endif int delta = 2; for (t = wait; t > 0; ) { if ((f = read_status(kbdcp(p))) & KBDS_ANY_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); b = read_data(kbdcp(p)); if ((f & KBDS_BUFFER_FULL) == KBDS_KBD_BUFFER_FULL) { addq(&kbdcp(p)->kbd, b); #if KBDIO_DEBUG >= 2 ++c1; } else { ++c2; #endif } t = wait; } else { t -= delta; } DELAY(delta*1000); } #if KBDIO_DEBUG >= 2 if ((c1 > 0) || (c2 > 0)) log(LOG_DEBUG, "kbdc: %d:%d char read (empty_aux_buffer)\n", c1, c2); #endif emptyq(&kbdcp(p)->aux); } /* discard any data from the keyboard or the aux device */ void empty_both_buffers(KBDC p, int wait) { int t; int f; int waited = 0; #if KBDIO_DEBUG >= 2 int c1 = 0; int c2 = 0; #endif int delta = 2; for (t = wait; t > 0; ) { if ((f = read_status(kbdcp(p))) & KBDS_ANY_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); (void)read_data(kbdcp(p)); #if KBDIO_DEBUG >= 2 if ((f & KBDS_BUFFER_FULL) == KBDS_KBD_BUFFER_FULL) ++c1; else ++c2; #endif t = wait; } else { t -= delta; } /* * Some systems (Intel/IBM blades) do not have keyboard devices and * will thus hang in this procedure. Time out after delta seconds to * avoid this hang -- the keyboard attach will fail later on. */ waited += (delta * 1000); if (waited == (delta * 1000000)) return; DELAY(delta*1000); } #if KBDIO_DEBUG >= 2 if ((c1 > 0) || (c2 > 0)) log(LOG_DEBUG, "kbdc: %d:%d char read (empty_both_buffers)\n", c1, c2); #endif emptyq(&kbdcp(p)->kbd); emptyq(&kbdcp(p)->aux); } /* keyboard and mouse device control */ /* NOTE: enable the keyboard port but disable the keyboard * interrupt before calling "reset_kbd()". */ int reset_kbd(KBDC p) { int retry = KBD_MAXRETRY; int again = KBD_MAXWAIT; int c = KBD_RESEND; /* keep the compiler happy */ while (retry-- > 0) { empty_both_buffers(p, 10); if (!write_kbd_command(p, KBDC_RESET_KBD)) continue; emptyq(&kbdcp(p)->kbd); c = read_controller_data(p); if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: RESET_KBD return code:%04x\n", c); if (c == KBD_ACK) /* keyboard has agreed to reset itself... */ break; } if (retry < 0) return FALSE; while (again-- > 0) { /* wait awhile, well, in fact we must wait quite loooooooooooong */ DELAY(KBD_RESETDELAY*1000); c = read_controller_data(p); /* RESET_DONE/RESET_FAIL */ if (c != -1) /* wait again if the controller is not ready */ break; } if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: RESET_KBD status:%04x\n", c); if (c != KBD_RESET_DONE) return FALSE; return TRUE; } /* NOTE: enable the aux port but disable the aux interrupt * before calling `reset_aux_dev()'. */ int reset_aux_dev(KBDC p) { int retry = KBD_MAXRETRY; int again = KBD_MAXWAIT; int c = PSM_RESEND; /* keep the compiler happy */ while (retry-- > 0) { empty_both_buffers(p, 10); if (!write_aux_command(p, PSMC_RESET_DEV)) continue; emptyq(&kbdcp(p)->aux); /* NOTE: Compaq Armada laptops require extra delay here. XXX */ for (again = KBD_MAXWAIT; again > 0; --again) { DELAY(KBD_RESETDELAY*1000); c = read_aux_data_no_wait(p); if (c != -1) break; } if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: RESET_AUX return code:%04x\n", c); if (c == PSM_ACK) /* aux dev is about to reset... */ break; } if (retry < 0) return FALSE; for (again = KBD_MAXWAIT; again > 0; --again) { /* wait awhile, well, quite looooooooooooong */ DELAY(KBD_RESETDELAY*1000); c = read_aux_data_no_wait(p); /* RESET_DONE/RESET_FAIL */ if (c != -1) /* wait again if the controller is not ready */ break; } if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: RESET_AUX status:%04x\n", c); if (c != PSM_RESET_DONE) /* reset status */ return FALSE; c = read_aux_data(p); /* device ID */ if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: RESET_AUX ID:%04x\n", c); /* NOTE: we could check the device ID now, but leave it later... */ return TRUE; } /* controller diagnostics and setup */ int test_controller(KBDC p) { int retry = KBD_MAXRETRY; int again = KBD_MAXWAIT; int c = KBD_DIAG_FAIL; while (retry-- > 0) { empty_both_buffers(p, 10); if (write_controller_command(p, KBDC_DIAGNOSE)) break; } if (retry < 0) return FALSE; emptyq(&kbdcp(p)->kbd); while (again-- > 0) { /* wait awhile */ DELAY(KBD_RESETDELAY*1000); c = read_controller_data(p); /* DIAG_DONE/DIAG_FAIL */ if (c != -1) /* wait again if the controller is not ready */ break; } if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: DIAGNOSE status:%04x\n", c); return (c == KBD_DIAG_DONE); } int test_kbd_port(KBDC p) { int retry = KBD_MAXRETRY; int again = KBD_MAXWAIT; int c = -1; while (retry-- > 0) { empty_both_buffers(p, 10); if (write_controller_command(p, KBDC_TEST_KBD_PORT)) break; } if (retry < 0) return FALSE; emptyq(&kbdcp(p)->kbd); while (again-- > 0) { c = read_controller_data(p); if (c != -1) /* try again if the controller is not ready */ break; } if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: TEST_KBD_PORT status:%04x\n", c); return c; } int test_aux_port(KBDC p) { int retry = KBD_MAXRETRY; int again = KBD_MAXWAIT; int c = -1; while (retry-- > 0) { empty_both_buffers(p, 10); if (write_controller_command(p, KBDC_TEST_AUX_PORT)) break; } if (retry < 0) return FALSE; emptyq(&kbdcp(p)->kbd); while (again-- > 0) { c = read_controller_data(p); if (c != -1) /* try again if the controller is not ready */ break; } if (verbose || bootverbose) log(LOG_DEBUG, "kbdc: TEST_AUX_PORT status:%04x\n", c); return c; } int kbdc_get_device_mask(KBDC p) { return kbdcp(p)->command_mask; } void kbdc_set_device_mask(KBDC p, int mask) { kbdcp(p)->command_mask = mask & (((kbdcp(p)->quirks & KBDC_QUIRK_KEEP_ACTIVATED) ? 0 : KBD_KBD_CONTROL_BITS) | KBD_AUX_CONTROL_BITS); } int get_controller_command_byte(KBDC p) { if (kbdcp(p)->command_byte != -1) return kbdcp(p)->command_byte; if (!write_controller_command(p, KBDC_GET_COMMAND_BYTE)) return -1; emptyq(&kbdcp(p)->kbd); kbdcp(p)->command_byte = read_controller_data(p); return kbdcp(p)->command_byte; } int set_controller_command_byte(KBDC p, int mask, int command) { if (get_controller_command_byte(p) == -1) return FALSE; command = (kbdcp(p)->command_byte & ~mask) | (command & mask); if (command & KBD_DISABLE_KBD_PORT) { if (!write_controller_command(p, KBDC_DISABLE_KBD_PORT)) return FALSE; } if (!write_controller_command(p, KBDC_SET_COMMAND_BYTE)) return FALSE; if (!write_controller_data(p, command)) return FALSE; kbdcp(p)->command_byte = command; if (verbose) log(LOG_DEBUG, "kbdc: new command byte:%04x (set_controller...)\n", command); return TRUE; +} + +/* + * Rudimentary support for active PS/2 AUX port multiplexing. + * Only write commands can be routed to a selected AUX port. + * Source port of data processed by read commands is totally ignored. + */ +static int +set_aux_mux_state(KBDC p, int enabled) +{ + int command, version; + + if (write_controller_command(p, KBDC_FORCE_AUX_OUTPUT) == 0 || + write_controller_data(p, 0xF0) == 0 || + read_controller_data(p) != 0xF0) + return (-1); + + if (write_controller_command(p, KBDC_FORCE_AUX_OUTPUT) == 0 || + write_controller_data(p, 0x56) == 0 || + read_controller_data(p) != 0x56) + return (-1); + + command = enabled ? 0xa4 : 0xa5; + if (write_controller_command(p, KBDC_FORCE_AUX_OUTPUT) == 0 || + write_controller_data(p, command) == 0 || + (version = read_controller_data(p)) == command) + return (-1); + + return (version); +} + +int +set_active_aux_mux_port(KBDC p, int port) +{ + + if (!aux_mux_is_enabled(p)) + return (FALSE); + + if (port < 0 || port >= KBDC_AUX_MUX_NUM_PORTS) + return (FALSE); + + kbdcp(p)->aux_mux_port = port; + + return (TRUE); +} + +/* Checks for active multiplexing support and enables it */ +int +enable_aux_mux(KBDC p) +{ + int version; + + version = set_aux_mux_state(p, TRUE); + if (version >= 0) { + kbdcp(p)->aux_mux_enabled = TRUE; + set_active_aux_mux_port(p, 0); + } + + return (version); +} + +int +disable_aux_mux(KBDC p) +{ + + kbdcp(p)->aux_mux_enabled = FALSE; + + return (set_aux_mux_state(p, FALSE)); +} + +int +aux_mux_is_enabled(KBDC p) +{ + + return (kbdcp(p)->aux_mux_enabled); } Index: projects/clang700-import/sys/dev/atkbdc/atkbdcreg.h =================================================================== --- projects/clang700-import/sys/dev/atkbdc/atkbdcreg.h (revision 340917) +++ projects/clang700-import/sys/dev/atkbdc/atkbdcreg.h (revision 340918) @@ -1,273 +1,284 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1996-1999 * Kazutaka YOKOTA (yokota@zodiac.mech.utsunomiya-u.ac.jp) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * from kbdio.h,v 1.8 1998/09/25 11:55:46 yokota Exp */ #ifndef _DEV_ATKBDC_ATKBDCREG_H_ #define _DEV_ATKBDC_ATKBDCREG_H_ #include "opt_kbd.h" /* Structures depend on the value if KBDIO_DEBUG */ /* constants */ /* I/O ports */ #define KBD_STATUS_PORT 4 /* status port, read */ #define KBD_COMMAND_PORT 4 /* controller command port, write */ #define KBD_DATA_PORT 0 /* data port, read/write * also used as keyboard command * and mouse command port */ /* controller commands (sent to KBD_COMMAND_PORT) */ #define KBDC_SET_COMMAND_BYTE 0x0060 #define KBDC_GET_COMMAND_BYTE 0x0020 +#define KBDC_WRITE_TO_AUX_MUX 0x0090 +#define KBDC_FORCE_AUX_OUTPUT 0x00d3 #define KBDC_WRITE_TO_AUX 0x00d4 #define KBDC_DISABLE_AUX_PORT 0x00a7 #define KBDC_ENABLE_AUX_PORT 0x00a8 #define KBDC_TEST_AUX_PORT 0x00a9 #define KBDC_DIAGNOSE 0x00aa #define KBDC_TEST_KBD_PORT 0x00ab #define KBDC_DISABLE_KBD_PORT 0x00ad #define KBDC_ENABLE_KBD_PORT 0x00ae /* controller command byte (set by KBDC_SET_COMMAND_BYTE) */ #define KBD_TRANSLATION 0x0040 #define KBD_RESERVED_BITS 0x0004 #define KBD_OVERRIDE_KBD_LOCK 0x0008 #define KBD_ENABLE_KBD_PORT 0x0000 #define KBD_DISABLE_KBD_PORT 0x0010 #define KBD_ENABLE_AUX_PORT 0x0000 #define KBD_DISABLE_AUX_PORT 0x0020 #define KBD_ENABLE_AUX_INT 0x0002 #define KBD_DISABLE_AUX_INT 0x0000 #define KBD_ENABLE_KBD_INT 0x0001 #define KBD_DISABLE_KBD_INT 0x0000 #define KBD_KBD_CONTROL_BITS (KBD_DISABLE_KBD_PORT | KBD_ENABLE_KBD_INT) #define KBD_AUX_CONTROL_BITS (KBD_DISABLE_AUX_PORT | KBD_ENABLE_AUX_INT) /* keyboard device commands (sent to KBD_DATA_PORT) */ #define KBDC_RESET_KBD 0x00ff #define KBDC_ENABLE_KBD 0x00f4 #define KBDC_DISABLE_KBD 0x00f5 #define KBDC_SET_DEFAULTS 0x00f6 #define KBDC_SEND_DEV_ID 0x00f2 #define KBDC_SET_LEDS 0x00ed #define KBDC_ECHO 0x00ee #define KBDC_SET_SCANCODE_SET 0x00f0 #define KBDC_SET_TYPEMATIC 0x00f3 /* aux device commands (sent to KBD_DATA_PORT) */ #define PSMC_RESET_DEV 0x00ff #define PSMC_ENABLE_DEV 0x00f4 #define PSMC_DISABLE_DEV 0x00f5 #define PSMC_SET_DEFAULTS 0x00f6 #define PSMC_SEND_DEV_ID 0x00f2 #define PSMC_SEND_DEV_STATUS 0x00e9 #define PSMC_SEND_DEV_DATA 0x00eb #define PSMC_SET_SCALING11 0x00e6 #define PSMC_SET_SCALING21 0x00e7 #define PSMC_SET_RESOLUTION 0x00e8 #define PSMC_SET_STREAM_MODE 0x00ea #define PSMC_SET_REMOTE_MODE 0x00f0 #define PSMC_SET_SAMPLING_RATE 0x00f3 /* PSMC_SET_RESOLUTION argument */ #define PSMD_RES_LOW 0 /* typically 25ppi */ #define PSMD_RES_MEDIUM_LOW 1 /* typically 50ppi */ #define PSMD_RES_MEDIUM_HIGH 2 /* typically 100ppi (default) */ #define PSMD_RES_HIGH 3 /* typically 200ppi */ #define PSMD_MAX_RESOLUTION PSMD_RES_HIGH /* PSMC_SET_SAMPLING_RATE */ #define PSMD_MAX_RATE 255 /* FIXME: not sure if it's possible */ /* status bits (KBD_STATUS_PORT) */ #define KBDS_BUFFER_FULL 0x0021 #define KBDS_ANY_BUFFER_FULL 0x0001 #define KBDS_KBD_BUFFER_FULL 0x0001 #define KBDS_AUX_BUFFER_FULL 0x0021 #define KBDS_INPUT_BUFFER_FULL 0x0002 /* return code */ #define KBD_ACK 0x00fa #define KBD_RESEND 0x00fe #define KBD_RESET_DONE 0x00aa #define KBD_RESET_FAIL 0x00fc #define KBD_DIAG_DONE 0x0055 #define KBD_DIAG_FAIL 0x00fd #define KBD_ECHO 0x00ee #define PSM_ACK 0x00fa #define PSM_RESEND 0x00fe #define PSM_RESET_DONE 0x00aa #define PSM_RESET_FAIL 0x00fc /* aux device ID */ #define PSM_MOUSE_ID 0 #define PSM_BALLPOINT_ID 2 #define PSM_INTELLI_ID 3 #define PSM_EXPLORER_ID 4 #define PSM_4DMOUSE_ID 6 #define PSM_4DPLUS_ID 8 #define PSM_4DPLUS_RFSW35_ID 24 #ifdef _KERNEL #define ATKBDC_DRIVER_NAME "atkbdc" /* * driver specific options: the following options may be set by * `options' statements in the kernel configuration file. */ /* retry count */ #ifndef KBD_MAXRETRY #define KBD_MAXRETRY 3 #endif /* timing parameters */ #ifndef KBD_RESETDELAY #define KBD_RESETDELAY 200 /* wait 200msec after kbd/mouse reset */ #endif #ifndef KBD_MAXWAIT #define KBD_MAXWAIT 5 /* wait 5 times at most after reset */ #endif /* I/O recovery time */ #define KBDC_DELAYTIME 20 #define KBDD_DELAYTIME 7 /* debug option */ #ifndef KBDIO_DEBUG #define KBDIO_DEBUG 0 #endif /* end of driver specific options */ /* types/structures */ #define KBDQ_BUFSIZE 32 typedef struct _kqueue { int head; int tail; unsigned char q[KBDQ_BUFSIZE]; #if KBDIO_DEBUG >= 2 int call_count; int qcount; int max_qcount; #endif } kqueue; struct resource; typedef struct atkbdc_softc { struct resource *port0; /* data port */ struct resource *port1; /* status port */ struct resource *irq; bus_space_tag_t iot; bus_space_handle_t ioh0; bus_space_handle_t ioh1; int command_byte; /* current command byte value */ int command_mask; /* command byte mask bits for kbd/aux devices */ int lock; /* FIXME: XXX not quite a semaphore... */ kqueue kbd; /* keyboard data queue */ kqueue aux; /* auxiliary data queue */ int retry; int quirks; /* controller doesn't like deactivate */ #define KBDC_QUIRK_KEEP_ACTIVATED (1 << 0) #define KBDC_QUIRK_IGNORE_PROBE_RESULT (1 << 1) #define KBDC_QUIRK_RESET_AFTER_PROBE (1 << 2) #define KBDC_QUIRK_SETLEDS_ON_INIT (1 << 3) + int aux_mux_enabled; /* active PS/2 multiplexing is enabled */ + int aux_mux_port; /* current aux mux port */ } atkbdc_softc_t; enum kbdc_device_ivar { KBDC_IVAR_VENDORID, KBDC_IVAR_SERIAL, KBDC_IVAR_LOGICALID, KBDC_IVAR_COMPATID, }; typedef caddr_t KBDC; #define KBDC_RID_KBD 0 #define KBDC_RID_AUX 1 +#define KBDC_AUX_MUX_NUM_PORTS 4 + /* function prototypes */ atkbdc_softc_t *atkbdc_get_softc(int unit); int atkbdc_probe_unit(int unit, struct resource *port0, struct resource *port1); int atkbdc_attach_unit(int unit, atkbdc_softc_t *sc, struct resource *port0, struct resource *port1); int atkbdc_configure(void); KBDC atkbdc_open(int unit); int kbdc_lock(KBDC kbdc, int lock); int kbdc_data_ready(KBDC kbdc); int write_controller_command(KBDC kbdc,int c); int write_controller_data(KBDC kbdc,int c); int write_kbd_command(KBDC kbdc,int c); int write_aux_command(KBDC kbdc,int c); int send_kbd_command(KBDC kbdc,int c); int send_aux_command(KBDC kbdc,int c); int send_kbd_command_and_data(KBDC kbdc,int c,int d); int send_aux_command_and_data(KBDC kbdc,int c,int d); int read_controller_data(KBDC kbdc); int read_kbd_data(KBDC kbdc); int read_kbd_data_no_wait(KBDC kbdc); int read_aux_data(KBDC kbdc); int read_aux_data_no_wait(KBDC kbdc); void empty_kbd_buffer(KBDC kbdc, int t); void empty_aux_buffer(KBDC kbdc, int t); void empty_both_buffers(KBDC kbdc, int t); int reset_kbd(KBDC kbdc); int reset_aux_dev(KBDC kbdc); int test_controller(KBDC kbdc); int test_kbd_port(KBDC kbdc); int test_aux_port(KBDC kbdc); int kbdc_get_device_mask(KBDC kbdc); void kbdc_set_device_mask(KBDC kbdc, int mask); int get_controller_command_byte(KBDC kbdc); int set_controller_command_byte(KBDC kbdc, int command, int flag); + +int set_active_aux_mux_port(KBDC p, int port); +int enable_aux_mux(KBDC p); +int disable_aux_mux(KBDC p); +int aux_mux_is_enabled(KBDC p); #endif /* _KERNEL */ #endif /* !_DEV_ATKBDC_ATKBDCREG_H_ */ Index: projects/clang700-import/sys/dev/atkbdc/psm.c =================================================================== --- projects/clang700-import/sys/dev/atkbdc/psm.c (revision 340917) +++ projects/clang700-import/sys/dev/atkbdc/psm.c (revision 340918) @@ -1,7298 +1,7423 @@ /*- * Copyright (c) 1992, 1993 Erik Forsberg. * Copyright (c) 1996, 1997 Kazutaka YOKOTA. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL I BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Ported to 386bsd Oct 17, 1992 * Sandi Donno, Computer Science, University of Cape Town, South Africa * Please send bug reports to sandi@cs.uct.ac.za * * Thanks are also due to Rick Macklem, rick@snowhite.cis.uoguelph.ca - * although I was only partially successful in getting the alpha release * of his "driver for the Logitech and ATI Inport Bus mice for use with * 386bsd and the X386 port" to work with my Microsoft mouse, I nevertheless * found his code to be an invaluable reference when porting this driver * to 386bsd. * * Further modifications for latest 386BSD+patchkit and port to NetBSD, * Andrew Herbert - 8 June 1993 * * Cloned from the Microsoft Bus Mouse driver, also by Erik Forsberg, by * Andrew Herbert - 12 June 1993 * * Modified for PS/2 mouse by Charles Hannum * - 13 June 1993 * * Modified for PS/2 AUX mouse by Shoji Yuen * - 24 October 1993 * * Hardware access routines and probe logic rewritten by * Kazutaka Yokota * - 3, 14, 22 October 1996. * - 12 November 1996. IOCTLs and rearranging `psmread', `psmioctl'... * - 14, 30 November 1996. Uses `kbdio.c'. * - 13 December 1996. Uses queuing version of `kbdio.c'. * - January/February 1997. Tweaked probe logic for * HiNote UltraII/Latitude/Armada laptops. * - 30 July 1997. Added APM support. * - 5 March 1997. Defined driver configuration flags (PSM_CONFIG_XXX). * Improved sync check logic. * Vendor specific support routines. */ #include __FBSDID("$FreeBSD$"); #include "opt_isa.h" #include "opt_psm.h" #include "opt_evdev.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_ISA #include #endif #ifdef EVDEV_SUPPORT #include #include #endif #include #include /* * Driver specific options: the following options may be set by * `options' statements in the kernel configuration file. */ /* debugging */ #ifndef PSM_DEBUG #define PSM_DEBUG 0 /* * logging: 0: none, 1: brief, 2: verbose * 3: sync errors, 4: all packets */ #endif #define VLOG(level, args) do { \ if (verbose >= level) \ log args; \ } while (0) #ifndef PSM_INPUT_TIMEOUT #define PSM_INPUT_TIMEOUT 2000000 /* 2 sec */ #endif #ifndef PSM_TAP_TIMEOUT #define PSM_TAP_TIMEOUT 125000 #endif #ifndef PSM_TAP_THRESHOLD #define PSM_TAP_THRESHOLD 25 #endif /* end of driver specific options */ #define PSMCPNP_DRIVER_NAME "psmcpnp" struct psmcpnp_softc { enum { PSMCPNP_GENERIC, PSMCPNP_FORCEPAD, - PSMCPNP_HPSYN81, } type; /* Based on PnP ID */ }; /* input queue */ #define PSM_BUFSIZE 960 #define PSM_SMALLBUFSIZE 240 /* operation levels */ #define PSM_LEVEL_BASE 0 #define PSM_LEVEL_STANDARD 1 #define PSM_LEVEL_NATIVE 2 #define PSM_LEVEL_MIN PSM_LEVEL_BASE #define PSM_LEVEL_MAX PSM_LEVEL_NATIVE +/* Active PS/2 multiplexing */ +#define PSM_NOMUX (-1) + /* Logitech PS2++ protocol */ #define MOUSE_PS2PLUS_CHECKBITS(b) \ ((((b[2] & 0x03) << 2) | 0x02) == (b[1] & 0x0f)) #define MOUSE_PS2PLUS_PACKET_TYPE(b) \ (((b[0] & 0x30) >> 2) | ((b[1] & 0x30) >> 4)) /* ring buffer */ typedef struct ringbuf { int count; /* # of valid elements in the buffer */ int head; /* head pointer */ int tail; /* tail poiner */ u_char buf[PSM_BUFSIZE]; } ringbuf_t; /* data buffer */ typedef struct packetbuf { u_char ipacket[16]; /* interim input buffer */ int inputbytes; /* # of bytes in the input buffer */ } packetbuf_t; #ifndef PSM_PACKETQUEUE #define PSM_PACKETQUEUE 128 #endif -/* - * Typical bezel limits. Taken from 'Synaptics - * PS/2 TouchPad Interfacing Guide' p.3.2.3. - */ -#define SYNAPTICS_DEFAULT_MAX_X 5472 -#define SYNAPTICS_DEFAULT_MAX_Y 4448 -#define SYNAPTICS_DEFAULT_MIN_X 1472 -#define SYNAPTICS_DEFAULT_MIN_Y 1408 - typedef struct synapticsinfo { struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; int directional_scrolls; int two_finger_scroll; int min_pressure; int max_pressure; int max_width; int margin_top; int margin_right; int margin_bottom; int margin_left; int na_top; int na_right; int na_bottom; int na_left; int window_min; int window_max; int multiplicator; int weight_current; int weight_previous; int weight_previous_na; int weight_len_squared; int div_min; int div_max; int div_max_na; int div_len; int tap_max_delta; int tap_min_queue; int taphold_timeout; int vscroll_ver_area; int vscroll_hor_area; int vscroll_min_delta; int vscroll_div_min; int vscroll_div_max; int touchpad_off; int softbuttons_y; int softbutton2_x; int softbutton3_x; int max_x; int max_y; } synapticsinfo_t; typedef struct synapticspacket { int x; int y; } synapticspacket_t; #define SYNAPTICS_PACKETQUEUE 10 #define SYNAPTICS_QUEUE_CURSOR(x) \ (x + SYNAPTICS_PACKETQUEUE) % SYNAPTICS_PACKETQUEUE #define SYNAPTICS_VERSION_GE(synhw, major, minor) \ ((synhw).infoMajor > (major) || \ ((synhw).infoMajor == (major) && (synhw).infoMinor >= (minor))) typedef struct smoother { synapticspacket_t queue[SYNAPTICS_PACKETQUEUE]; int queue_len; int queue_cursor; int start_x; int start_y; int avg_dx; int avg_dy; int squelch_x; int squelch_y; int is_fuzzy; int active; } smoother_t; typedef struct gesture { int window_min; int fingers_nb; int tap_button; int in_taphold; int in_vscroll; int zmax; /* maximum pressure value */ struct timeval taptimeout; /* tap timeout for touchpads */ } gesture_t; enum { TRACKPOINT_SYSCTL_SENSITIVITY, TRACKPOINT_SYSCTL_NEGATIVE_INERTIA, TRACKPOINT_SYSCTL_UPPER_PLATEAU, TRACKPOINT_SYSCTL_BACKUP_RANGE, TRACKPOINT_SYSCTL_DRAG_HYSTERESIS, TRACKPOINT_SYSCTL_MINIMUM_DRAG, TRACKPOINT_SYSCTL_UP_THRESHOLD, TRACKPOINT_SYSCTL_THRESHOLD, TRACKPOINT_SYSCTL_JENKS_CURVATURE, TRACKPOINT_SYSCTL_Z_TIME, TRACKPOINT_SYSCTL_PRESS_TO_SELECT, TRACKPOINT_SYSCTL_SKIP_BACKUPS }; typedef struct trackpointinfo { struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; int sensitivity; int inertia; int uplateau; int reach; int draghys; int mindrag; int upthresh; int threshold; int jenks; int ztime; int pts; int skipback; } trackpointinfo_t; typedef struct finger { int x; int y; int p; int w; int flags; } finger_t; #define PSM_FINGERS 2 /* # of processed fingers */ #define PSM_FINGER_IS_PEN (1<<0) #define PSM_FINGER_FUZZY (1<<1) #define PSM_FINGER_DEFAULT_P tap_threshold #define PSM_FINGER_DEFAULT_W 1 #define PSM_FINGER_IS_SET(f) ((f).x != -1 && (f).y != -1 && (f).p != 0) #define PSM_FINGER_RESET(f) do { \ (f) = (finger_t) { .x = -1, .y = -1, .p = 0, .w = 0, .flags = 0 }; \ } while (0) typedef struct elantechhw { int hwversion; int fwversion; int sizex; int sizey; int dpmmx; int dpmmy; int ntracesx; int ntracesy; int dptracex; int dptracey; int issemimt; int isclickpad; int hascrc; int hastrackpoint; int haspressure; } elantechhw_t; /* minimum versions supported by this driver */ #define ELANTECH_HW_IS_V1(fwver) ((fwver) < 0x020030 || (fwver) == 0x020600) #define ELANTECH_MAGIC(magic) \ ((magic)[0] == 0x3c && (magic)[1] == 0x03 && \ ((magic)[2] == 0xc8 || (magic)[2] == 0x00)) #define ELANTECH_FW_ID 0x00 #define ELANTECH_FW_VERSION 0x01 #define ELANTECH_CAPABILITIES 0x02 #define ELANTECH_SAMPLE 0x03 #define ELANTECH_RESOLUTION 0x04 #define ELANTECH_REG_READ 0x10 #define ELANTECH_REG_WRITE 0x11 #define ELANTECH_REG_RDWR 0x00 #define ELANTECH_CUSTOM_CMD 0xf8 #ifdef EVDEV_SUPPORT #define ELANTECH_MAX_FINGERS 5 #else #define ELANTECH_MAX_FINGERS PSM_FINGERS #endif #define ELANTECH_FINGER_MAX_P 255 #define ELANTECH_FINGER_MAX_W 15 #define ELANTECH_FINGER_SET_XYP(pb) (finger_t) { \ .x = (((pb)->ipacket[1] & 0x0f) << 8) | (pb)->ipacket[2], \ .y = (((pb)->ipacket[4] & 0x0f) << 8) | (pb)->ipacket[5], \ .p = ((pb)->ipacket[1] & 0xf0) | (((pb)->ipacket[4] >> 4) & 0x0f), \ .w = PSM_FINGER_DEFAULT_W, \ .flags = 0 \ } enum { ELANTECH_PKT_NOP, ELANTECH_PKT_TRACKPOINT, ELANTECH_PKT_V2_COMMON, ELANTECH_PKT_V2_2FINGER, ELANTECH_PKT_V3, ELANTECH_PKT_V4_STATUS, ELANTECH_PKT_V4_HEAD, ELANTECH_PKT_V4_MOTION }; #define ELANTECH_PKT_IS_TRACKPOINT(pb) (((pb)->ipacket[3] & 0x0f) == 0x06) #define ELANTECH_PKT_IS_DEBOUNCE(pb, hwversion) ((hwversion) == 4 ? 0 : \ (pb)->ipacket[0] == ((hwversion) == 2 ? 0x84 : 0xc4) && \ (pb)->ipacket[1] == 0xff && (pb)->ipacket[2] == 0xff && \ (pb)->ipacket[3] == 0x02 && (pb)->ipacket[4] == 0xff && \ (pb)->ipacket[5] == 0xff) #define ELANTECH_PKT_IS_V2(pb) \ (((pb)->ipacket[0] & 0x0c) == 0x04 && ((pb)->ipacket[3] & 0x0f) == 0x02) #define ELANTECH_PKT_IS_V3_HEAD(pb, hascrc) ((hascrc) ? \ ((pb)->ipacket[3] & 0x09) == 0x08 : \ ((pb)->ipacket[0] & 0x0c) == 0x04 && ((pb)->ipacket[3] & 0xcf) == 0x02) #define ELANTECH_PKT_IS_V3_TAIL(pb, hascrc) ((hascrc) ? \ ((pb)->ipacket[3] & 0x09) == 0x09 : \ ((pb)->ipacket[0] & 0x0c) == 0x0c && ((pb)->ipacket[3] & 0xce) == 0x0c) #define ELANTECH_PKT_IS_V4(pb, hascrc) ((hascrc) ? \ ((pb)->ipacket[3] & 0x08) == 0x00 : \ ((pb)->ipacket[0] & 0x0c) == 0x04 && ((pb)->ipacket[3] & 0x1c) == 0x10) typedef struct elantechaction { finger_t fingers[ELANTECH_MAX_FINGERS]; int mask; int mask_v4wait; } elantechaction_t; /* driver control block */ struct psm_softc { /* Driver status information */ int unit; struct selinfo rsel; /* Process selecting for Input */ u_char state; /* Mouse driver state */ int config; /* driver configuration flags */ int flags; /* other flags */ KBDC kbdc; /* handle to access kbd controller */ struct resource *intr; /* IRQ resource */ void *ih; /* interrupt handle */ mousehw_t hw; /* hardware information */ synapticshw_t synhw; /* Synaptics hardware information */ synapticsinfo_t syninfo; /* Synaptics configuration */ smoother_t smoother[PSM_FINGERS]; /* Motion smoothing */ gesture_t gesture; /* Gesture context */ elantechhw_t elanhw; /* Elantech hardware information */ elantechaction_t elanaction; /* Elantech action context */ int tphw; /* TrackPoint hardware information */ trackpointinfo_t tpinfo; /* TrackPoint configuration */ mousemode_t mode; /* operation mode */ mousemode_t dflt_mode; /* default operation mode */ mousestatus_t status; /* accumulated mouse movement */ ringbuf_t queue; /* mouse status queue */ packetbuf_t pqueue[PSM_PACKETQUEUE]; /* mouse data queue */ int pqueue_start; /* start of data in queue */ int pqueue_end; /* end of data in queue */ int button; /* the latest button state */ int xold; /* previous absolute X position */ int yold; /* previous absolute Y position */ int xaverage; /* average X position */ int yaverage; /* average Y position */ int squelch; /* level to filter movement at low speed */ int syncerrors; /* # of bytes discarded to synchronize */ int pkterrors; /* # of packets failed during quaranteen. */ int fpcount; /* forcePad valid packet counter */ struct timeval inputtimeout; struct timeval lastsoftintr; /* time of last soft interrupt */ struct timeval lastinputerr; /* time last sync error happened */ struct timeval idletimeout; packetbuf_t idlepacket; /* packet to send after idle timeout */ int watchdog; /* watchdog timer flag */ struct callout callout; /* watchdog timer call out */ struct callout softcallout; /* buffer timer call out */ struct cdev *dev; struct cdev *bdev; int lasterr; int cmdcount; struct sigio *async; /* Processes waiting for SIGIO */ int extended_buttons; + int muxport; /* MUX port with attached Synaptics */ + u_char muxsave[3]; /* 3->6 byte proto conversion buffer */ + int muxtpbuttons; /* Touchpad button state */ + int muxmsbuttons; /* Mouse (trackpoint) button state */ + struct timeval muxmidtimeout; /* middle button supression timeout */ #ifdef EVDEV_SUPPORT struct evdev_dev *evdev_a; /* Absolute reporting device */ struct evdev_dev *evdev_r; /* Relative reporting device */ #endif }; static devclass_t psm_devclass; /* driver state flags (state) */ #define PSM_VALID 0x80 #define PSM_OPEN 1 /* Device is open */ #define PSM_ASLP 2 /* Waiting for mouse data */ #define PSM_SOFTARMED 4 /* Software interrupt armed */ #define PSM_NEED_SYNCBITS 8 /* Set syncbits using next data pkt */ #define PSM_EV_OPEN_R 0x10 /* Relative evdev device is open */ #define PSM_EV_OPEN_A 0x20 /* Absolute evdev device is open */ /* driver configuration flags (config) */ #define PSM_CONFIG_RESOLUTION 0x000f /* resolution */ #define PSM_CONFIG_ACCEL 0x00f0 /* acceleration factor */ #define PSM_CONFIG_NOCHECKSYNC 0x0100 /* disable sync. test */ #define PSM_CONFIG_NOIDPROBE 0x0200 /* disable mouse model probe */ #define PSM_CONFIG_NORESET 0x0400 /* don't reset the mouse */ #define PSM_CONFIG_FORCETAP 0x0800 /* assume `tap' action exists */ #define PSM_CONFIG_IGNPORTERROR 0x1000 /* ignore error in aux port test */ #define PSM_CONFIG_HOOKRESUME 0x2000 /* hook the system resume event */ #define PSM_CONFIG_INITAFTERSUSPEND 0x4000 /* init the device at the resume event */ #define PSM_CONFIG_FLAGS \ (PSM_CONFIG_RESOLUTION | \ PSM_CONFIG_ACCEL | \ PSM_CONFIG_NOCHECKSYNC | \ PSM_CONFIG_NOIDPROBE | \ PSM_CONFIG_NORESET | \ PSM_CONFIG_FORCETAP | \ PSM_CONFIG_IGNPORTERROR | \ PSM_CONFIG_HOOKRESUME | \ PSM_CONFIG_INITAFTERSUSPEND) /* other flags (flags) */ #define PSM_FLAGS_FINGERDOWN 0x0001 /* VersaPad finger down */ #define kbdcp(p) ((atkbdc_softc_t *)(p)) #define ALWAYS_RESTORE_CONTROLLER(kbdc) !(kbdcp(kbdc)->quirks \ & KBDC_QUIRK_KEEP_ACTIVATED) /* Tunables */ static int tap_enabled = -1; static int verbose = PSM_DEBUG; static int synaptics_support = 0; static int trackpoint_support = 0; static int elantech_support = 0; /* for backward compatibility */ #define OLD_MOUSE_GETHWINFO _IOR('M', 1, old_mousehw_t) #define OLD_MOUSE_GETMODE _IOR('M', 2, old_mousemode_t) #define OLD_MOUSE_SETMODE _IOW('M', 3, old_mousemode_t) typedef struct old_mousehw { int buttons; int iftype; int type; int hwid; } old_mousehw_t; typedef struct old_mousemode { int protocol; int rate; int resolution; int accelfactor; } old_mousemode_t; #define SYN_OFFSET(field) offsetof(struct psm_softc, syninfo.field) enum { SYNAPTICS_SYSCTL_MIN_PRESSURE = SYN_OFFSET(min_pressure), SYNAPTICS_SYSCTL_MAX_PRESSURE = SYN_OFFSET(max_pressure), SYNAPTICS_SYSCTL_MAX_WIDTH = SYN_OFFSET(max_width), SYNAPTICS_SYSCTL_MARGIN_TOP = SYN_OFFSET(margin_top), SYNAPTICS_SYSCTL_MARGIN_RIGHT = SYN_OFFSET(margin_right), SYNAPTICS_SYSCTL_MARGIN_BOTTOM = SYN_OFFSET(margin_bottom), SYNAPTICS_SYSCTL_MARGIN_LEFT = SYN_OFFSET(margin_left), SYNAPTICS_SYSCTL_NA_TOP = SYN_OFFSET(na_top), SYNAPTICS_SYSCTL_NA_RIGHT = SYN_OFFSET(na_right), SYNAPTICS_SYSCTL_NA_BOTTOM = SYN_OFFSET(na_bottom), SYNAPTICS_SYSCTL_NA_LEFT = SYN_OFFSET(na_left), SYNAPTICS_SYSCTL_WINDOW_MIN = SYN_OFFSET(window_min), SYNAPTICS_SYSCTL_WINDOW_MAX = SYN_OFFSET(window_max), SYNAPTICS_SYSCTL_MULTIPLICATOR = SYN_OFFSET(multiplicator), SYNAPTICS_SYSCTL_WEIGHT_CURRENT = SYN_OFFSET(weight_current), SYNAPTICS_SYSCTL_WEIGHT_PREVIOUS = SYN_OFFSET(weight_previous), SYNAPTICS_SYSCTL_WEIGHT_PREVIOUS_NA = SYN_OFFSET(weight_previous_na), SYNAPTICS_SYSCTL_WEIGHT_LEN_SQUARED = SYN_OFFSET(weight_len_squared), SYNAPTICS_SYSCTL_DIV_MIN = SYN_OFFSET(div_min), SYNAPTICS_SYSCTL_DIV_MAX = SYN_OFFSET(div_max), SYNAPTICS_SYSCTL_DIV_MAX_NA = SYN_OFFSET(div_max_na), SYNAPTICS_SYSCTL_DIV_LEN = SYN_OFFSET(div_len), SYNAPTICS_SYSCTL_TAP_MAX_DELTA = SYN_OFFSET(tap_max_delta), SYNAPTICS_SYSCTL_TAP_MIN_QUEUE = SYN_OFFSET(tap_min_queue), SYNAPTICS_SYSCTL_TAPHOLD_TIMEOUT = SYN_OFFSET(taphold_timeout), SYNAPTICS_SYSCTL_VSCROLL_HOR_AREA = SYN_OFFSET(vscroll_hor_area), SYNAPTICS_SYSCTL_VSCROLL_VER_AREA = SYN_OFFSET(vscroll_ver_area), SYNAPTICS_SYSCTL_VSCROLL_MIN_DELTA = SYN_OFFSET(vscroll_min_delta), SYNAPTICS_SYSCTL_VSCROLL_DIV_MIN = SYN_OFFSET(vscroll_div_min), SYNAPTICS_SYSCTL_VSCROLL_DIV_MAX = SYN_OFFSET(vscroll_div_max), SYNAPTICS_SYSCTL_TOUCHPAD_OFF = SYN_OFFSET(touchpad_off), SYNAPTICS_SYSCTL_SOFTBUTTONS_Y = SYN_OFFSET(softbuttons_y), SYNAPTICS_SYSCTL_SOFTBUTTON2_X = SYN_OFFSET(softbutton2_x), SYNAPTICS_SYSCTL_SOFTBUTTON3_X = SYN_OFFSET(softbutton3_x), }; /* packet formatting function */ typedef int packetfunc_t(struct psm_softc *, u_char *, int *, int, mousestatus_t *); /* function prototypes */ static void psmidentify(driver_t *, device_t); static int psmprobe(device_t); static int psmattach(device_t); static int psmdetach(device_t); static int psmresume(device_t); static d_open_t psm_cdev_open; static d_close_t psm_cdev_close; static d_read_t psmread; static d_write_t psmwrite; static d_ioctl_t psmioctl; static d_poll_t psmpoll; static int psmopen(struct psm_softc *); static int psmclose(struct psm_softc *); #ifdef EVDEV_SUPPORT static evdev_open_t psm_ev_open_r; static evdev_close_t psm_ev_close_r; static evdev_open_t psm_ev_open_a; static evdev_close_t psm_ev_close_a; #endif static int enable_aux_dev(KBDC); static int disable_aux_dev(KBDC); static int get_mouse_status(KBDC, int *, int, int); static int get_aux_id(KBDC); static int set_mouse_sampling_rate(KBDC, int); static int set_mouse_scaling(KBDC, int); static int set_mouse_resolution(KBDC, int); static int set_mouse_mode(KBDC); static int get_mouse_buttons(KBDC); static int is_a_mouse(int); static void recover_from_error(KBDC); static int restore_controller(KBDC, int); static int doinitialize(struct psm_softc *, mousemode_t *); static int doopen(struct psm_softc *, int); static int reinitialize(struct psm_softc *, int); static char *model_name(int); static void psmsoftintr(void *); static void psmsoftintridle(void *); static void psmintr(void *); static void psmtimeout(void *); static int timeelapsed(const struct timeval *, int, int, const struct timeval *); static void dropqueue(struct psm_softc *); static void flushpackets(struct psm_softc *); static void proc_mmanplus(struct psm_softc *, packetbuf_t *, mousestatus_t *, int *, int *, int *); static int proc_synaptics(struct psm_softc *, packetbuf_t *, mousestatus_t *, int *, int *, int *); +static int proc_synaptics_mux(struct psm_softc *, packetbuf_t *); static void proc_versapad(struct psm_softc *, packetbuf_t *, mousestatus_t *, int *, int *, int *); static int proc_elantech(struct psm_softc *, packetbuf_t *, mousestatus_t *, int *, int *, int *); static int psmpalmdetect(struct psm_softc *, finger_t *, int); static void psmgestures(struct psm_softc *, finger_t *, int, mousestatus_t *); static void psmsmoother(struct psm_softc *, finger_t *, int, mousestatus_t *, int *, int *); static int tame_mouse(struct psm_softc *, packetbuf_t *, mousestatus_t *, u_char *); /* vendor specific features */ enum probearg { PROBE, REINIT }; typedef int probefunc_t(struct psm_softc *, enum probearg); static int mouse_id_proc1(KBDC, int, int, int *); static int mouse_ext_command(KBDC, int); static probefunc_t enable_groller; static probefunc_t enable_gmouse; static probefunc_t enable_aglide; static probefunc_t enable_kmouse; static probefunc_t enable_msexplorer; static probefunc_t enable_msintelli; static probefunc_t enable_4dmouse; static probefunc_t enable_4dplus; static probefunc_t enable_mmanplus; static probefunc_t enable_synaptics; +static probefunc_t enable_synaptics_mux; static probefunc_t enable_trackpoint; static probefunc_t enable_versapad; static probefunc_t enable_elantech; static void set_trackpoint_parameters(struct psm_softc *sc); static void synaptics_passthrough_on(struct psm_softc *sc); static void synaptics_passthrough_off(struct psm_softc *sc); static int synaptics_preferred_mode(struct psm_softc *sc); static void synaptics_set_mode(struct psm_softc *sc, int mode_byte); static struct { int model; u_char syncmask; int packetsize; probefunc_t *probefunc; } vendortype[] = { /* * WARNING: the order of probe is very important. Don't mess it * unless you know what you are doing. */ + { MOUSE_MODEL_SYNAPTICS, /* Synaptics Touchpad on Active Mux */ + 0x00, MOUSE_PS2_PACKETSIZE, enable_synaptics_mux }, { MOUSE_MODEL_NET, /* Genius NetMouse */ 0x08, MOUSE_PS2INTELLI_PACKETSIZE, enable_gmouse }, { MOUSE_MODEL_NETSCROLL, /* Genius NetScroll */ 0xc8, 6, enable_groller }, { MOUSE_MODEL_MOUSEMANPLUS, /* Logitech MouseMan+ */ 0x08, MOUSE_PS2_PACKETSIZE, enable_mmanplus }, { MOUSE_MODEL_EXPLORER, /* Microsoft IntelliMouse Explorer */ 0x08, MOUSE_PS2INTELLI_PACKETSIZE, enable_msexplorer }, { MOUSE_MODEL_4D, /* A4 Tech 4D Mouse */ 0x08, MOUSE_4D_PACKETSIZE, enable_4dmouse }, { MOUSE_MODEL_4DPLUS, /* A4 Tech 4D+ Mouse */ 0xc8, MOUSE_4DPLUS_PACKETSIZE, enable_4dplus }, { MOUSE_MODEL_SYNAPTICS, /* Synaptics Touchpad */ 0xc0, MOUSE_SYNAPTICS_PACKETSIZE, enable_synaptics }, { MOUSE_MODEL_ELANTECH, /* Elantech Touchpad */ 0x04, MOUSE_ELANTECH_PACKETSIZE, enable_elantech }, { MOUSE_MODEL_INTELLI, /* Microsoft IntelliMouse */ 0x08, MOUSE_PS2INTELLI_PACKETSIZE, enable_msintelli }, { MOUSE_MODEL_GLIDEPOINT, /* ALPS GlidePoint */ 0xc0, MOUSE_PS2_PACKETSIZE, enable_aglide }, { MOUSE_MODEL_THINK, /* Kensington ThinkingMouse */ 0x80, MOUSE_PS2_PACKETSIZE, enable_kmouse }, { MOUSE_MODEL_VERSAPAD, /* Interlink electronics VersaPad */ 0xe8, MOUSE_PS2VERSA_PACKETSIZE, enable_versapad }, { MOUSE_MODEL_TRACKPOINT, /* IBM/Lenovo TrackPoint */ 0xc0, MOUSE_PS2_PACKETSIZE, enable_trackpoint }, { MOUSE_MODEL_GENERIC, 0xc0, MOUSE_PS2_PACKETSIZE, NULL }, }; #define GENERIC_MOUSE_ENTRY (nitems(vendortype) - 1) /* device driver declarateion */ static device_method_t psm_methods[] = { /* Device interface */ DEVMETHOD(device_identify, psmidentify), DEVMETHOD(device_probe, psmprobe), DEVMETHOD(device_attach, psmattach), DEVMETHOD(device_detach, psmdetach), DEVMETHOD(device_resume, psmresume), { 0, 0 } }; static driver_t psm_driver = { PSM_DRIVER_NAME, psm_methods, sizeof(struct psm_softc), }; static struct cdevsw psm_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = psm_cdev_open, .d_close = psm_cdev_close, .d_read = psmread, .d_write = psmwrite, .d_ioctl = psmioctl, .d_poll = psmpoll, .d_name = PSM_DRIVER_NAME, }; #ifdef EVDEV_SUPPORT static const struct evdev_methods psm_ev_methods_r = { .ev_open = psm_ev_open_r, .ev_close = psm_ev_close_r, }; static const struct evdev_methods psm_ev_methods_a = { .ev_open = psm_ev_open_a, .ev_close = psm_ev_close_a, }; #endif /* device I/O routines */ static int enable_aux_dev(KBDC kbdc) { int res; res = send_aux_command(kbdc, PSMC_ENABLE_DEV); VLOG(2, (LOG_DEBUG, "psm: ENABLE_DEV return code:%04x\n", res)); return (res == PSM_ACK); } static int disable_aux_dev(KBDC kbdc) { int res; res = send_aux_command(kbdc, PSMC_DISABLE_DEV); VLOG(2, (LOG_DEBUG, "psm: DISABLE_DEV return code:%04x\n", res)); return (res == PSM_ACK); } static int get_mouse_status(KBDC kbdc, int *status, int flag, int len) { int cmd; int res; int i; switch (flag) { case 0: default: cmd = PSMC_SEND_DEV_STATUS; break; case 1: cmd = PSMC_SEND_DEV_DATA; break; } empty_aux_buffer(kbdc, 5); res = send_aux_command(kbdc, cmd); VLOG(2, (LOG_DEBUG, "psm: SEND_AUX_DEV_%s return code:%04x\n", (flag == 1) ? "DATA" : "STATUS", res)); if (res != PSM_ACK) return (0); for (i = 0; i < len; ++i) { status[i] = read_aux_data(kbdc); if (status[i] < 0) break; } if (len >= 3) { for (; i < 3; ++i) status[i] = 0; VLOG(1, (LOG_DEBUG, "psm: %s %02x %02x %02x\n", (flag == 1) ? "data" : "status", status[0], status[1], status[2])); } return (i); } static int get_aux_id(KBDC kbdc) { int res; int id; empty_aux_buffer(kbdc, 5); res = send_aux_command(kbdc, PSMC_SEND_DEV_ID); VLOG(2, (LOG_DEBUG, "psm: SEND_DEV_ID return code:%04x\n", res)); if (res != PSM_ACK) return (-1); /* 10ms delay */ DELAY(10000); id = read_aux_data(kbdc); VLOG(2, (LOG_DEBUG, "psm: device ID: %04x\n", id)); return (id); } static int set_mouse_sampling_rate(KBDC kbdc, int rate) { int res; res = send_aux_command_and_data(kbdc, PSMC_SET_SAMPLING_RATE, rate); VLOG(2, (LOG_DEBUG, "psm: SET_SAMPLING_RATE (%d) %04x\n", rate, res)); return ((res == PSM_ACK) ? rate : -1); } static int set_mouse_scaling(KBDC kbdc, int scale) { int res; switch (scale) { case 1: default: scale = PSMC_SET_SCALING11; break; case 2: scale = PSMC_SET_SCALING21; break; } res = send_aux_command(kbdc, scale); VLOG(2, (LOG_DEBUG, "psm: SET_SCALING%s return code:%04x\n", (scale == PSMC_SET_SCALING21) ? "21" : "11", res)); return (res == PSM_ACK); } /* `val' must be 0 through PSMD_MAX_RESOLUTION */ static int set_mouse_resolution(KBDC kbdc, int val) { int res; res = send_aux_command_and_data(kbdc, PSMC_SET_RESOLUTION, val); VLOG(2, (LOG_DEBUG, "psm: SET_RESOLUTION (%d) %04x\n", val, res)); return ((res == PSM_ACK) ? val : -1); } /* * NOTE: once `set_mouse_mode()' is called, the mouse device must be * re-enabled by calling `enable_aux_dev()' */ static int set_mouse_mode(KBDC kbdc) { int res; res = send_aux_command(kbdc, PSMC_SET_STREAM_MODE); VLOG(2, (LOG_DEBUG, "psm: SET_STREAM_MODE return code:%04x\n", res)); return (res == PSM_ACK); } static int get_mouse_buttons(KBDC kbdc) { int c = 2; /* assume two buttons by default */ int status[3]; /* * NOTE: a special sequence to obtain Logitech Mouse specific * information: set resolution to 25 ppi, set scaling to 1:1, set * scaling to 1:1, set scaling to 1:1. Then the second byte of the * mouse status bytes is the number of available buttons. * Some manufactures also support this sequence. */ if (set_mouse_resolution(kbdc, PSMD_RES_LOW) != PSMD_RES_LOW) return (c); if (set_mouse_scaling(kbdc, 1) && set_mouse_scaling(kbdc, 1) && set_mouse_scaling(kbdc, 1) && get_mouse_status(kbdc, status, 0, 3) >= 3 && status[1] != 0) return (status[1]); return (c); } /* misc subroutines */ /* * Someday, I will get the complete list of valid pointing devices and * their IDs... XXX */ static int is_a_mouse(int id) { #if 0 static int valid_ids[] = { PSM_MOUSE_ID, /* mouse */ PSM_BALLPOINT_ID, /* ballpoint device */ PSM_INTELLI_ID, /* Intellimouse */ PSM_EXPLORER_ID, /* Intellimouse Explorer */ -1 /* end of table */ }; int i; for (i = 0; valid_ids[i] >= 0; ++i) if (valid_ids[i] == id) return (TRUE); return (FALSE); #else return (TRUE); #endif } static char * model_name(int model) { static struct { int model_code; char *model_name; } models[] = { { MOUSE_MODEL_NETSCROLL, "NetScroll" }, { MOUSE_MODEL_NET, "NetMouse/NetScroll Optical" }, { MOUSE_MODEL_GLIDEPOINT, "GlidePoint" }, { MOUSE_MODEL_THINK, "ThinkingMouse" }, { MOUSE_MODEL_INTELLI, "IntelliMouse" }, { MOUSE_MODEL_MOUSEMANPLUS, "MouseMan+" }, { MOUSE_MODEL_VERSAPAD, "VersaPad" }, { MOUSE_MODEL_EXPLORER, "IntelliMouse Explorer" }, { MOUSE_MODEL_4D, "4D Mouse" }, { MOUSE_MODEL_4DPLUS, "4D+ Mouse" }, { MOUSE_MODEL_SYNAPTICS, "Synaptics Touchpad" }, { MOUSE_MODEL_TRACKPOINT, "IBM/Lenovo TrackPoint" }, { MOUSE_MODEL_ELANTECH, "Elantech Touchpad" }, { MOUSE_MODEL_GENERIC, "Generic PS/2 mouse" }, { MOUSE_MODEL_UNKNOWN, "Unknown" }, }; int i; for (i = 0; models[i].model_code != MOUSE_MODEL_UNKNOWN; ++i) if (models[i].model_code == model) break; return (models[i].model_name); } static void recover_from_error(KBDC kbdc) { /* discard anything left in the output buffer */ empty_both_buffers(kbdc, 10); #if 0 /* * NOTE: KBDC_RESET_KBD may not restore the communication between the * keyboard and the controller. */ reset_kbd(kbdc); #else /* * NOTE: somehow diagnostic and keyboard port test commands bring the * keyboard back. */ if (!test_controller(kbdc)) log(LOG_ERR, "psm: keyboard controller failed.\n"); /* if there isn't a keyboard in the system, the following error is OK */ if (test_kbd_port(kbdc) != 0) VLOG(1, (LOG_ERR, "psm: keyboard port failed.\n")); #endif } static int restore_controller(KBDC kbdc, int command_byte) { empty_both_buffers(kbdc, 10); if (!set_controller_command_byte(kbdc, 0xff, command_byte)) { log(LOG_ERR, "psm: failed to restore the keyboard controller " "command byte.\n"); empty_both_buffers(kbdc, 10); return (FALSE); } else { empty_both_buffers(kbdc, 10); return (TRUE); } } /* * Re-initialize the aux port and device. The aux port must be enabled * and its interrupt must be disabled before calling this routine. * The aux device will be disabled before returning. * The keyboard controller must be locked via `kbdc_lock()' before * calling this routine. */ static int doinitialize(struct psm_softc *sc, mousemode_t *mode) { KBDC kbdc = sc->kbdc; int stat[3]; int i; switch((i = test_aux_port(kbdc))) { case 1: /* ignore these errors */ case 2: case 3: case PSM_ACK: if (verbose) log(LOG_DEBUG, "psm%d: strange result for test aux port (%d).\n", sc->unit, i); /* FALLTHROUGH */ case 0: /* no error */ break; case -1: /* time out */ default: /* error */ recover_from_error(kbdc); if (sc->config & PSM_CONFIG_IGNPORTERROR) break; log(LOG_ERR, "psm%d: the aux port is not functioning (%d).\n", sc->unit, i); return (FALSE); } if (sc->config & PSM_CONFIG_NORESET) { /* * Don't try to reset the pointing device. It may possibly * be left in the unknown state, though... */ } else { /* * NOTE: some controllers appears to hang the `keyboard' when * the aux port doesn't exist and `PSMC_RESET_DEV' is issued. */ if (!reset_aux_dev(kbdc)) { recover_from_error(kbdc); log(LOG_ERR, "psm%d: failed to reset the aux device.\n", sc->unit); return (FALSE); } } /* * both the aux port and the aux device is functioning, see * if the device can be enabled. */ if (!enable_aux_dev(kbdc) || !disable_aux_dev(kbdc)) { log(LOG_ERR, "psm%d: failed to enable the aux device.\n", sc->unit); return (FALSE); } empty_both_buffers(kbdc, 10); /* remove stray data if any */ /* Re-enable the mouse. */ for (i = 0; vendortype[i].probefunc != NULL; ++i) if (vendortype[i].model == sc->hw.model) (*vendortype[i].probefunc)(sc, REINIT); /* set mouse parameters */ if (mode != (mousemode_t *)NULL) { if (mode->rate > 0) mode->rate = set_mouse_sampling_rate(kbdc, mode->rate); if (mode->resolution >= 0) mode->resolution = set_mouse_resolution(kbdc, mode->resolution); set_mouse_scaling(kbdc, 1); set_mouse_mode(kbdc); } /* Record sync on the next data packet we see. */ sc->flags |= PSM_NEED_SYNCBITS; /* just check the status of the mouse */ if (get_mouse_status(kbdc, stat, 0, 3) < 3) log(LOG_DEBUG, "psm%d: failed to get status (doinitialize).\n", sc->unit); return (TRUE); } static int doopen(struct psm_softc *sc, int command_byte) { int stat[3]; + int mux_enabled = FALSE; /* * FIXME: Synaptics TouchPad seems to go back to Relative Mode with * no obvious reason. Thus we check the current mode and restore the * Absolute Mode if it was cleared. * * The previous hack at the end of psmprobe() wasn't efficient when * moused(8) was restarted. * * A Reset (FF) or Set Defaults (F6) command would clear the * Absolute Mode bit. But a verbose boot or debug.psm.loglevel=5 * doesn't show any evidence of such a command. */ if (sc->hw.model == MOUSE_MODEL_SYNAPTICS) { + if (sc->muxport != PSM_NOMUX) { + mux_enabled = enable_aux_mux(sc->kbdc) >= 0; + if (mux_enabled) + set_active_aux_mux_port(sc->kbdc, sc->muxport); + else + log(LOG_ERR, "psm%d: failed to enable " + "active multiplexing mode.\n", + sc->unit); + } mouse_ext_command(sc->kbdc, 1); get_mouse_status(sc->kbdc, stat, 0, 3); if ((SYNAPTICS_VERSION_GE(sc->synhw, 7, 5) || - stat[1] == 0x46 || stat[1] == 0x47) && + stat[1] == 0x47) && stat[2] == 0x40) { synaptics_set_mode(sc, synaptics_preferred_mode(sc)); VLOG(5, (LOG_DEBUG, "psm%d: Synaptis Absolute Mode " "hopefully restored\n", sc->unit)); } + if (mux_enabled) + disable_aux_mux(sc->kbdc); } /* * A user may want to disable tap and drag gestures on a Synaptics * TouchPad when it operates in Relative Mode. */ if (sc->hw.model == MOUSE_MODEL_GENERIC) { if (tap_enabled > 0) { VLOG(2, (LOG_DEBUG, "psm%d: enable tap and drag gestures\n", sc->unit)); synaptics_set_mode(sc, synaptics_preferred_mode(sc)); } else if (tap_enabled == 0) { VLOG(2, (LOG_DEBUG, "psm%d: disable tap and drag gestures\n", sc->unit)); synaptics_set_mode(sc, synaptics_preferred_mode(sc)); } } /* enable the mouse device */ if (!enable_aux_dev(sc->kbdc)) { /* MOUSE ERROR: failed to enable the mouse because: * 1) the mouse is faulty, * 2) the mouse has been removed(!?) * In the latter case, the keyboard may have hung, and need * recovery procedure... */ recover_from_error(sc->kbdc); #if 0 /* FIXME: we could reset the mouse here and try to enable * it again. But it will take long time and it's not a good * idea to disable the keyboard that long... */ if (!doinitialize(sc, &sc->mode) || !enable_aux_dev(sc->kbdc)) { recover_from_error(sc->kbdc); #else { #endif restore_controller(sc->kbdc, command_byte); /* mark this device is no longer available */ sc->state &= ~PSM_VALID; log(LOG_ERR, "psm%d: failed to enable the device (doopen).\n", sc->unit); return (EIO); } } if (get_mouse_status(sc->kbdc, stat, 0, 3) < 3) log(LOG_DEBUG, "psm%d: failed to get status (doopen).\n", sc->unit); /* enable the aux port and interrupt */ if (!set_controller_command_byte(sc->kbdc, kbdc_get_device_mask(sc->kbdc), (command_byte & KBD_KBD_CONTROL_BITS) | KBD_ENABLE_AUX_PORT | KBD_ENABLE_AUX_INT)) { /* CONTROLLER ERROR */ disable_aux_dev(sc->kbdc); restore_controller(sc->kbdc, command_byte); log(LOG_ERR, "psm%d: failed to enable the aux interrupt (doopen).\n", sc->unit); return (EIO); } /* start the watchdog timer */ sc->watchdog = FALSE; callout_reset(&sc->callout, hz * 2, psmtimeout, sc); return (0); } static int reinitialize(struct psm_softc *sc, int doinit) { int err; int c; int s; /* don't let anybody mess with the aux device */ if (!kbdc_lock(sc->kbdc, TRUE)) return (EIO); s = spltty(); /* block our watchdog timer */ sc->watchdog = FALSE; callout_stop(&sc->callout); /* save the current controller command byte */ empty_both_buffers(sc->kbdc, 10); c = get_controller_command_byte(sc->kbdc); VLOG(2, (LOG_DEBUG, "psm%d: current command byte: %04x (reinitialize).\n", sc->unit, c)); /* enable the aux port but disable the aux interrupt and the keyboard */ if ((c == -1) || !set_controller_command_byte(sc->kbdc, kbdc_get_device_mask(sc->kbdc), KBD_DISABLE_KBD_PORT | KBD_DISABLE_KBD_INT | KBD_ENABLE_AUX_PORT | KBD_DISABLE_AUX_INT)) { /* CONTROLLER ERROR */ splx(s); kbdc_lock(sc->kbdc, FALSE); log(LOG_ERR, "psm%d: unable to set the command byte (reinitialize).\n", sc->unit); return (EIO); } /* flush any data */ if (sc->state & PSM_VALID) { /* this may fail; but never mind... */ disable_aux_dev(sc->kbdc); empty_aux_buffer(sc->kbdc, 10); } flushpackets(sc); sc->syncerrors = 0; sc->pkterrors = 0; memset(&sc->lastinputerr, 0, sizeof(sc->lastinputerr)); /* try to detect the aux device; are you still there? */ err = 0; if (doinit) { if (doinitialize(sc, &sc->mode)) { /* yes */ sc->state |= PSM_VALID; } else { /* the device has gone! */ restore_controller(sc->kbdc, c); sc->state &= ~PSM_VALID; log(LOG_ERR, "psm%d: the aux device has gone! (reinitialize).\n", sc->unit); err = ENXIO; } } splx(s); /* restore the driver state */ if ((sc->state & (PSM_OPEN | PSM_EV_OPEN_R | PSM_EV_OPEN_A)) && (err == 0)) { /* enable the aux device and the port again */ err = doopen(sc, c); if (err != 0) log(LOG_ERR, "psm%d: failed to enable the device " "(reinitialize).\n", sc->unit); } else { /* restore the keyboard port and disable the aux port */ if (!set_controller_command_byte(sc->kbdc, kbdc_get_device_mask(sc->kbdc), (c & KBD_KBD_CONTROL_BITS) | KBD_DISABLE_AUX_PORT | KBD_DISABLE_AUX_INT)) { /* CONTROLLER ERROR */ log(LOG_ERR, "psm%d: failed to disable the aux port " "(reinitialize).\n", sc->unit); err = EIO; } } kbdc_lock(sc->kbdc, FALSE); return (err); } /* psm driver entry points */ static void psmidentify(driver_t *driver, device_t parent) { device_t psmc; device_t psm; u_long irq; int unit; unit = device_get_unit(parent); /* always add at least one child */ psm = BUS_ADD_CHILD(parent, KBDC_RID_AUX, driver->name, unit); if (psm == NULL) return; irq = bus_get_resource_start(psm, SYS_RES_IRQ, KBDC_RID_AUX); if (irq > 0) return; /* * If the PS/2 mouse device has already been reported by ACPI or * PnP BIOS, obtain the IRQ resource from it. * (See psmcpnp_attach() below.) */ psmc = device_find_child(device_get_parent(parent), PSMCPNP_DRIVER_NAME, unit); if (psmc == NULL) return; irq = bus_get_resource_start(psmc, SYS_RES_IRQ, 0); if (irq <= 0) return; bus_delete_resource(psmc, SYS_RES_IRQ, 0); bus_set_resource(psm, SYS_RES_IRQ, KBDC_RID_AUX, irq, 1); } #define endprobe(v) do { \ if (bootverbose) \ --verbose; \ kbdc_set_device_mask(sc->kbdc, mask); \ kbdc_lock(sc->kbdc, FALSE); \ return (v); \ } while (0) static int psmprobe(device_t dev) { int unit = device_get_unit(dev); struct psm_softc *sc = device_get_softc(dev); int stat[3]; int command_byte; int mask; int rid; int i; #if 0 kbdc_debug(TRUE); #endif /* see if IRQ is available */ rid = KBDC_RID_AUX; sc->intr = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->intr == NULL) { if (bootverbose) device_printf(dev, "unable to allocate IRQ\n"); return (ENXIO); } bus_release_resource(dev, SYS_RES_IRQ, rid, sc->intr); sc->unit = unit; sc->kbdc = atkbdc_open(device_get_unit(device_get_parent(dev))); sc->config = device_get_flags(dev) & PSM_CONFIG_FLAGS; /* XXX: for backward compatibility */ #if defined(PSM_HOOKRESUME) || defined(PSM_HOOKAPM) sc->config |= #ifdef PSM_RESETAFTERSUSPEND PSM_CONFIG_INITAFTERSUSPEND; #else PSM_CONFIG_HOOKRESUME; #endif #endif /* PSM_HOOKRESUME | PSM_HOOKAPM */ sc->flags = 0; + sc->muxport = PSM_NOMUX; if (bootverbose) ++verbose; device_set_desc(dev, "PS/2 Mouse"); if (!kbdc_lock(sc->kbdc, TRUE)) { printf("psm%d: unable to lock the controller.\n", unit); if (bootverbose) --verbose; return (ENXIO); } /* * NOTE: two bits in the command byte controls the operation of the * aux port (mouse port): the aux port disable bit (bit 5) and the aux * port interrupt (IRQ 12) enable bit (bit 2). */ /* discard anything left after the keyboard initialization */ empty_both_buffers(sc->kbdc, 10); /* save the current command byte; it will be used later */ mask = kbdc_get_device_mask(sc->kbdc) & ~KBD_AUX_CONTROL_BITS; command_byte = get_controller_command_byte(sc->kbdc); if (verbose) printf("psm%d: current command byte:%04x\n", unit, command_byte); if (command_byte == -1) { /* CONTROLLER ERROR */ printf("psm%d: unable to get the current command byte value.\n", unit); endprobe(ENXIO); } /* * disable the keyboard port while probing the aux port, which must be * enabled during this routine */ if (!set_controller_command_byte(sc->kbdc, KBD_KBD_CONTROL_BITS | KBD_AUX_CONTROL_BITS, KBD_DISABLE_KBD_PORT | KBD_DISABLE_KBD_INT | KBD_ENABLE_AUX_PORT | KBD_DISABLE_AUX_INT)) { /* * this is CONTROLLER ERROR; I don't know how to recover * from this error... */ if (ALWAYS_RESTORE_CONTROLLER(sc->kbdc)) restore_controller(sc->kbdc, command_byte); printf("psm%d: unable to set the command byte.\n", unit); endprobe(ENXIO); } write_controller_command(sc->kbdc, KBDC_ENABLE_AUX_PORT); /* * NOTE: `test_aux_port()' is designed to return with zero if the aux * port exists and is functioning. However, some controllers appears * to respond with zero even when the aux port doesn't exist. (It may * be that this is only the case when the controller DOES have the aux * port but the port is not wired on the motherboard.) The keyboard * controllers without the port, such as the original AT, are * supposed to return with an error code or simply time out. In any * case, we have to continue probing the port even when the controller * passes this test. * * XXX: some controllers erroneously return the error code 1, 2 or 3 * when it has a perfectly functional aux port. We have to ignore * this error code. Even if the controller HAS error with the aux * port, it will be detected later... * XXX: another incompatible controller returns PSM_ACK (0xfa)... */ switch ((i = test_aux_port(sc->kbdc))) { case 1: /* ignore these errors */ case 2: case 3: case PSM_ACK: if (verbose) printf("psm%d: strange result for test aux port " "(%d).\n", unit, i); /* FALLTHROUGH */ case 0: /* no error */ break; case -1: /* time out */ default: /* error */ recover_from_error(sc->kbdc); if (sc->config & PSM_CONFIG_IGNPORTERROR) break; if (ALWAYS_RESTORE_CONTROLLER(sc->kbdc)) restore_controller(sc->kbdc, command_byte); if (verbose) printf("psm%d: the aux port is not functioning (%d).\n", unit, i); endprobe(ENXIO); } if (sc->config & PSM_CONFIG_NORESET) { /* * Don't try to reset the pointing device. It may possibly be * left in an unknown state, though... */ } else { /* * NOTE: some controllers appears to hang the `keyboard' when * the aux port doesn't exist and `PSMC_RESET_DEV' is issued. * * Attempt to reset the controller twice -- this helps * pierce through some KVM switches. The second reset * is non-fatal. */ if (!reset_aux_dev(sc->kbdc)) { recover_from_error(sc->kbdc); if (ALWAYS_RESTORE_CONTROLLER(sc->kbdc)) restore_controller(sc->kbdc, command_byte); if (verbose) printf("psm%d: failed to reset the aux " "device.\n", unit); endprobe(ENXIO); } else if (!reset_aux_dev(sc->kbdc)) { recover_from_error(sc->kbdc); if (verbose >= 2) printf("psm%d: failed to reset the aux device " "(2).\n", unit); } } /* * both the aux port and the aux device are functioning, see if the * device can be enabled. NOTE: when enabled, the device will start * sending data; we shall immediately disable the device once we know * the device can be enabled. */ if (!enable_aux_dev(sc->kbdc) || !disable_aux_dev(sc->kbdc)) { /* MOUSE ERROR */ recover_from_error(sc->kbdc); if (ALWAYS_RESTORE_CONTROLLER(sc->kbdc)) restore_controller(sc->kbdc, command_byte); if (verbose) printf("psm%d: failed to enable the aux device.\n", unit); endprobe(ENXIO); } /* save the default values after reset */ if (get_mouse_status(sc->kbdc, stat, 0, 3) >= 3) { sc->dflt_mode.rate = sc->mode.rate = stat[2]; sc->dflt_mode.resolution = sc->mode.resolution = stat[1]; } else { sc->dflt_mode.rate = sc->mode.rate = -1; sc->dflt_mode.resolution = sc->mode.resolution = -1; } /* hardware information */ sc->hw.iftype = MOUSE_IF_PS2; /* verify the device is a mouse */ sc->hw.hwid = get_aux_id(sc->kbdc); if (!is_a_mouse(sc->hw.hwid)) { if (ALWAYS_RESTORE_CONTROLLER(sc->kbdc)) restore_controller(sc->kbdc, command_byte); if (verbose) printf("psm%d: unknown device type (%d).\n", unit, sc->hw.hwid); endprobe(ENXIO); } switch (sc->hw.hwid) { case PSM_BALLPOINT_ID: sc->hw.type = MOUSE_TRACKBALL; break; case PSM_MOUSE_ID: case PSM_INTELLI_ID: case PSM_EXPLORER_ID: case PSM_4DMOUSE_ID: case PSM_4DPLUS_ID: sc->hw.type = MOUSE_MOUSE; break; default: sc->hw.type = MOUSE_UNKNOWN; break; } if (sc->config & PSM_CONFIG_NOIDPROBE) { sc->hw.buttons = 2; i = GENERIC_MOUSE_ENTRY; } else { /* # of buttons */ sc->hw.buttons = get_mouse_buttons(sc->kbdc); /* other parameters */ for (i = 0; vendortype[i].probefunc != NULL; ++i) if ((*vendortype[i].probefunc)(sc, PROBE)) { if (verbose >= 2) printf("psm%d: found %s\n", unit, model_name(vendortype[i].model)); break; } } sc->hw.model = vendortype[i].model; sc->dflt_mode.level = PSM_LEVEL_BASE; sc->dflt_mode.packetsize = MOUSE_PS2_PACKETSIZE; sc->dflt_mode.accelfactor = (sc->config & PSM_CONFIG_ACCEL) >> 4; if (sc->config & PSM_CONFIG_NOCHECKSYNC) sc->dflt_mode.syncmask[0] = 0; else sc->dflt_mode.syncmask[0] = vendortype[i].syncmask; if (sc->config & PSM_CONFIG_FORCETAP) sc->dflt_mode.syncmask[0] &= ~MOUSE_PS2_TAP; sc->dflt_mode.syncmask[1] = 0; /* syncbits */ sc->mode = sc->dflt_mode; sc->mode.packetsize = vendortype[i].packetsize; /* set mouse parameters */ #if 0 /* * A version of Logitech FirstMouse+ won't report wheel movement, * if SET_DEFAULTS is sent... Don't use this command. * This fix was found by Takashi Nishida. */ i = send_aux_command(sc->kbdc, PSMC_SET_DEFAULTS); if (verbose >= 2) printf("psm%d: SET_DEFAULTS return code:%04x\n", unit, i); #endif if (sc->config & PSM_CONFIG_RESOLUTION) sc->mode.resolution = set_mouse_resolution(sc->kbdc, (sc->config & PSM_CONFIG_RESOLUTION) - 1); else if (sc->mode.resolution >= 0) sc->mode.resolution = set_mouse_resolution(sc->kbdc, sc->dflt_mode.resolution); if (sc->mode.rate > 0) sc->mode.rate = set_mouse_sampling_rate(sc->kbdc, sc->dflt_mode.rate); set_mouse_scaling(sc->kbdc, 1); /* Record sync on the next data packet we see. */ sc->flags |= PSM_NEED_SYNCBITS; /* just check the status of the mouse */ /* * NOTE: XXX there are some arcane controller/mouse combinations out * there, which hung the controller unless there is data transmission * after ACK from the mouse. */ if (get_mouse_status(sc->kbdc, stat, 0, 3) < 3) printf("psm%d: failed to get status.\n", unit); else { /* * When in its native mode, some mice operate with different * default parameters than in the PS/2 compatible mode. */ sc->dflt_mode.rate = sc->mode.rate = stat[2]; sc->dflt_mode.resolution = sc->mode.resolution = stat[1]; } /* disable the aux port for now... */ if (!set_controller_command_byte(sc->kbdc, KBD_KBD_CONTROL_BITS | KBD_AUX_CONTROL_BITS, (command_byte & KBD_KBD_CONTROL_BITS) | KBD_DISABLE_AUX_PORT | KBD_DISABLE_AUX_INT)) { /* * this is CONTROLLER ERROR; I don't know the proper way to * recover from this error... */ if (ALWAYS_RESTORE_CONTROLLER(sc->kbdc)) restore_controller(sc->kbdc, command_byte); printf("psm%d: unable to set the command byte.\n", unit); endprobe(ENXIO); } /* done */ kbdc_set_device_mask(sc->kbdc, mask | KBD_AUX_CONTROL_BITS); kbdc_lock(sc->kbdc, FALSE); return (0); } #ifdef EVDEV_SUPPORT /* Values are taken from Linux drivers for userland software compatibility */ #define PS2_MOUSE_VENDOR 0x0002 #define PS2_MOUSE_GENERIC_PRODUCT 0x0001 #define PS2_MOUSE_SYNAPTICS_NAME "SynPS/2 Synaptics TouchPad" #define PS2_MOUSE_SYNAPTICS_PRODUCT 0x0007 #define PS2_MOUSE_TRACKPOINT_NAME "TPPS/2 IBM TrackPoint" #define PS2_MOUSE_TRACKPOINT_PRODUCT 0x000A #define PS2_MOUSE_ELANTECH_NAME "ETPS/2 Elantech Touchpad" #define PS2_MOUSE_ELANTECH_ST_NAME "ETPS/2 Elantech TrackPoint" #define PS2_MOUSE_ELANTECH_PRODUCT 0x000E #define ABSINFO_END { ABS_CNT, 0, 0, 0 } static void psm_support_abs_bulk(struct evdev_dev *evdev, const uint16_t info[][4]) { size_t i; for (i = 0; info[i][0] != ABS_CNT; i++) evdev_support_abs(evdev, info[i][0], 0, info[i][1], info[i][2], 0, 0, info[i][3]); } static void psm_push_mt_finger(struct psm_softc *sc, int id, const finger_t *f) { int y = sc->synhw.minimumYCoord + sc->synhw.maximumYCoord - f->y; evdev_push_abs(sc->evdev_a, ABS_MT_SLOT, id); evdev_push_abs(sc->evdev_a, ABS_MT_TRACKING_ID, id); evdev_push_abs(sc->evdev_a, ABS_MT_POSITION_X, f->x); evdev_push_abs(sc->evdev_a, ABS_MT_POSITION_Y, y); evdev_push_abs(sc->evdev_a, ABS_MT_PRESSURE, f->p); } static void psm_push_st_finger(struct psm_softc *sc, const finger_t *f) { int y = sc->synhw.minimumYCoord + sc->synhw.maximumYCoord - f->y; evdev_push_abs(sc->evdev_a, ABS_X, f->x); evdev_push_abs(sc->evdev_a, ABS_Y, y); evdev_push_abs(sc->evdev_a, ABS_PRESSURE, f->p); if (sc->synhw.capPalmDetect) evdev_push_abs(sc->evdev_a, ABS_TOOL_WIDTH, f->w); } static void psm_release_mt_slot(struct evdev_dev *evdev, int32_t slot) { evdev_push_abs(evdev, ABS_MT_SLOT, slot); evdev_push_abs(evdev, ABS_MT_TRACKING_ID, -1); } static int psm_register(device_t dev, int model_code) { struct psm_softc *sc = device_get_softc(dev); struct evdev_dev *evdev_r; int error, i, nbuttons, nwheels, product; bool is_pointing_stick; const char *name; name = model_name(model_code); nbuttons = sc->hw.buttons; product = PS2_MOUSE_GENERIC_PRODUCT; nwheels = 0; is_pointing_stick = false; switch (model_code) { case MOUSE_MODEL_TRACKPOINT: name = PS2_MOUSE_TRACKPOINT_NAME; product = PS2_MOUSE_TRACKPOINT_PRODUCT; nbuttons = 3; is_pointing_stick = true; break; case MOUSE_MODEL_ELANTECH: name = PS2_MOUSE_ELANTECH_ST_NAME; product = PS2_MOUSE_ELANTECH_PRODUCT; nbuttons = 3; is_pointing_stick = true; break; case MOUSE_MODEL_MOUSEMANPLUS: case MOUSE_MODEL_4D: nwheels = 2; break; case MOUSE_MODEL_EXPLORER: case MOUSE_MODEL_INTELLI: case MOUSE_MODEL_NET: case MOUSE_MODEL_NETSCROLL: case MOUSE_MODEL_4DPLUS: nwheels = 1; break; } evdev_r = evdev_alloc(); evdev_set_name(evdev_r, name); evdev_set_phys(evdev_r, device_get_nameunit(dev)); evdev_set_id(evdev_r, BUS_I8042, PS2_MOUSE_VENDOR, product, 0); evdev_set_methods(evdev_r, sc, &psm_ev_methods_r); evdev_support_prop(evdev_r, INPUT_PROP_POINTER); if (is_pointing_stick) evdev_support_prop(evdev_r, INPUT_PROP_POINTING_STICK); evdev_support_event(evdev_r, EV_SYN); evdev_support_event(evdev_r, EV_KEY); evdev_support_event(evdev_r, EV_REL); evdev_support_rel(evdev_r, REL_X); evdev_support_rel(evdev_r, REL_Y); switch (nwheels) { case 2: evdev_support_rel(evdev_r, REL_HWHEEL); /* FALLTHROUGH */ case 1: evdev_support_rel(evdev_r, REL_WHEEL); } for (i = 0; i < nbuttons; i++) evdev_support_key(evdev_r, BTN_MOUSE + i); error = evdev_register_mtx(evdev_r, &Giant); if (error) evdev_free(evdev_r); else sc->evdev_r = evdev_r; return (error); } static int psm_register_synaptics(device_t dev) { struct psm_softc *sc = device_get_softc(dev); const uint16_t synaptics_absinfo_st[][4] = { { ABS_X, sc->synhw.minimumXCoord, sc->synhw.maximumXCoord, sc->synhw.infoXupmm }, { ABS_Y, sc->synhw.minimumYCoord, sc->synhw.maximumYCoord, sc->synhw.infoYupmm }, { ABS_PRESSURE, 0, ELANTECH_FINGER_MAX_P, 0 }, ABSINFO_END, }; const uint16_t synaptics_absinfo_mt[][4] = { { ABS_MT_SLOT, 0, PSM_FINGERS-1, 0}, { ABS_MT_TRACKING_ID, -1, PSM_FINGERS-1, 0}, { ABS_MT_POSITION_X, sc->synhw.minimumXCoord, sc->synhw.maximumXCoord, sc->synhw.infoXupmm }, { ABS_MT_POSITION_Y, sc->synhw.minimumYCoord, sc->synhw.maximumYCoord, sc->synhw.infoYupmm }, { ABS_MT_PRESSURE, 0, ELANTECH_FINGER_MAX_P, 0 }, ABSINFO_END, }; struct evdev_dev *evdev_a; int error, i, guest_model; evdev_a = evdev_alloc(); evdev_set_name(evdev_a, PS2_MOUSE_SYNAPTICS_NAME); evdev_set_phys(evdev_a, device_get_nameunit(dev)); evdev_set_id(evdev_a, BUS_I8042, PS2_MOUSE_VENDOR, PS2_MOUSE_SYNAPTICS_PRODUCT, 0); evdev_set_methods(evdev_a, sc, &psm_ev_methods_a); evdev_support_event(evdev_a, EV_SYN); evdev_support_event(evdev_a, EV_KEY); evdev_support_event(evdev_a, EV_ABS); evdev_support_prop(evdev_a, INPUT_PROP_POINTER); if (sc->synhw.capAdvancedGestures) evdev_support_prop(evdev_a, INPUT_PROP_SEMI_MT); if (sc->synhw.capClickPad) evdev_support_prop(evdev_a, INPUT_PROP_BUTTONPAD); evdev_support_key(evdev_a, BTN_TOUCH); evdev_support_nfingers(evdev_a, 3); psm_support_abs_bulk(evdev_a, synaptics_absinfo_st); if (sc->synhw.capAdvancedGestures || sc->synhw.capReportsV) psm_support_abs_bulk(evdev_a, synaptics_absinfo_mt); if (sc->synhw.capPalmDetect) evdev_support_abs(evdev_a, ABS_TOOL_WIDTH, 0, 0, 15, 0, 0, 0); evdev_support_key(evdev_a, BTN_LEFT); if (!sc->synhw.capClickPad) { evdev_support_key(evdev_a, BTN_RIGHT); if (sc->synhw.capExtended && sc->synhw.capMiddle) evdev_support_key(evdev_a, BTN_MIDDLE); } if (sc->synhw.capExtended && sc->synhw.capFourButtons) { evdev_support_key(evdev_a, BTN_BACK); evdev_support_key(evdev_a, BTN_FORWARD); } if (sc->synhw.capExtended && (sc->synhw.nExtendedButtons > 0)) for (i = 0; i < sc->synhw.nExtendedButtons; i++) evdev_support_key(evdev_a, BTN_0 + i); error = evdev_register_mtx(evdev_a, &Giant); - if (!error && sc->synhw.capPassthrough) { + if (!error && (sc->synhw.capPassthrough || sc->muxport != PSM_NOMUX)) { guest_model = sc->tpinfo.sysctl_tree != NULL ? MOUSE_MODEL_TRACKPOINT : MOUSE_MODEL_GENERIC; error = psm_register(dev, guest_model); } if (error) evdev_free(evdev_a); else sc->evdev_a = evdev_a; return (error); } static int psm_register_elantech(device_t dev) { struct psm_softc *sc = device_get_softc(dev); const uint16_t elantech_absinfo[][4] = { { ABS_X, 0, sc->elanhw.sizex, sc->elanhw.dpmmx }, { ABS_Y, 0, sc->elanhw.sizey, sc->elanhw.dpmmy }, { ABS_PRESSURE, 0, ELANTECH_FINGER_MAX_P, 0 }, { ABS_TOOL_WIDTH, 0, ELANTECH_FINGER_MAX_W, 0 }, { ABS_MT_SLOT, 0, ELANTECH_MAX_FINGERS - 1, 0 }, { ABS_MT_TRACKING_ID, -1, ELANTECH_MAX_FINGERS - 1, 0 }, { ABS_MT_POSITION_X, 0, sc->elanhw.sizex, sc->elanhw.dpmmx }, { ABS_MT_POSITION_Y, 0, sc->elanhw.sizey, sc->elanhw.dpmmy }, { ABS_MT_PRESSURE, 0, ELANTECH_FINGER_MAX_P, 0 }, { ABS_MT_TOUCH_MAJOR, 0, ELANTECH_FINGER_MAX_W * sc->elanhw.dptracex, 0 }, ABSINFO_END, }; struct evdev_dev *evdev_a; int error; evdev_a = evdev_alloc(); evdev_set_name(evdev_a, PS2_MOUSE_ELANTECH_NAME); evdev_set_phys(evdev_a, device_get_nameunit(dev)); evdev_set_id(evdev_a, BUS_I8042, PS2_MOUSE_VENDOR, PS2_MOUSE_ELANTECH_PRODUCT, 0); evdev_set_methods(evdev_a, sc, &psm_ev_methods_a); evdev_support_event(evdev_a, EV_SYN); evdev_support_event(evdev_a, EV_KEY); evdev_support_event(evdev_a, EV_ABS); evdev_support_prop(evdev_a, INPUT_PROP_POINTER); if (sc->elanhw.issemimt) evdev_support_prop(evdev_a, INPUT_PROP_SEMI_MT); if (sc->elanhw.isclickpad) evdev_support_prop(evdev_a, INPUT_PROP_BUTTONPAD); evdev_support_key(evdev_a, BTN_TOUCH); evdev_support_nfingers(evdev_a, ELANTECH_MAX_FINGERS); evdev_support_key(evdev_a, BTN_LEFT); if (!sc->elanhw.isclickpad) evdev_support_key(evdev_a, BTN_RIGHT); psm_support_abs_bulk(evdev_a, elantech_absinfo); error = evdev_register_mtx(evdev_a, &Giant); if (!error && sc->elanhw.hastrackpoint) error = psm_register(dev, MOUSE_MODEL_ELANTECH); if (error) evdev_free(evdev_a); else sc->evdev_a = evdev_a; return (error); } #endif static int psmattach(device_t dev) { int unit = device_get_unit(dev); struct psm_softc *sc = device_get_softc(dev); int error; int rid; /* Setup initial state */ sc->state = PSM_VALID; callout_init(&sc->callout, 0); callout_init(&sc->softcallout, 0); /* Setup our interrupt handler */ rid = KBDC_RID_AUX; sc->intr = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->intr == NULL) return (ENXIO); error = bus_setup_intr(dev, sc->intr, INTR_TYPE_TTY, NULL, psmintr, sc, &sc->ih); if (error) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->intr); return (error); } /* Done */ sc->dev = make_dev(&psm_cdevsw, 0, 0, 0, 0666, "psm%d", unit); sc->dev->si_drv1 = sc; sc->bdev = make_dev(&psm_cdevsw, 0, 0, 0, 0666, "bpsm%d", unit); sc->bdev->si_drv1 = sc; #ifdef EVDEV_SUPPORT switch (sc->hw.model) { case MOUSE_MODEL_SYNAPTICS: error = psm_register_synaptics(dev); break; case MOUSE_MODEL_ELANTECH: error = psm_register_elantech(dev); break; default: error = psm_register(dev, sc->hw.model); } if (error) return (error); #endif /* Some touchpad devices need full reinitialization after suspend. */ switch (sc->hw.model) { case MOUSE_MODEL_SYNAPTICS: case MOUSE_MODEL_GLIDEPOINT: case MOUSE_MODEL_VERSAPAD: case MOUSE_MODEL_ELANTECH: sc->config |= PSM_CONFIG_INITAFTERSUSPEND; break; default: if (sc->synhw.infoMajor >= 4 || sc->tphw > 0) sc->config |= PSM_CONFIG_INITAFTERSUSPEND; break; } /* Elantech trackpad`s sync bit differs from touchpad`s one */ if (sc->hw.model == MOUSE_MODEL_ELANTECH && (sc->elanhw.hascrc || sc->elanhw.hastrackpoint)) { sc->config |= PSM_CONFIG_NOCHECKSYNC; sc->flags &= ~PSM_NEED_SYNCBITS; } if (!verbose) printf("psm%d: model %s, device ID %d\n", unit, model_name(sc->hw.model), sc->hw.hwid & 0x00ff); else { printf("psm%d: model %s, device ID %d-%02x, %d buttons\n", unit, model_name(sc->hw.model), sc->hw.hwid & 0x00ff, sc->hw.hwid >> 8, sc->hw.buttons); printf("psm%d: config:%08x, flags:%08x, packet size:%d\n", unit, sc->config, sc->flags, sc->mode.packetsize); printf("psm%d: syncmask:%02x, syncbits:%02x%s\n", unit, sc->mode.syncmask[0], sc->mode.syncmask[1], sc->config & PSM_CONFIG_NOCHECKSYNC ? " (sync not checked)" : ""); } if (bootverbose) --verbose; return (0); } static int psmdetach(device_t dev) { struct psm_softc *sc; int rid; sc = device_get_softc(dev); if (sc->state & PSM_OPEN) return (EBUSY); #ifdef EVDEV_SUPPORT evdev_free(sc->evdev_r); evdev_free(sc->evdev_a); #endif rid = KBDC_RID_AUX; bus_teardown_intr(dev, sc->intr, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, rid, sc->intr); destroy_dev(sc->dev); destroy_dev(sc->bdev); callout_drain(&sc->callout); callout_drain(&sc->softcallout); return (0); } #ifdef EVDEV_SUPPORT static int psm_ev_open_r(struct evdev_dev *evdev) { struct psm_softc *sc = evdev_get_softc(evdev); int err = 0; /* Get device data */ if ((sc->state & PSM_VALID) == 0) { /* the device is no longer valid/functioning */ return (ENXIO); } if (!(sc->state & (PSM_OPEN | PSM_EV_OPEN_A))) err = psmopen(sc); if (err == 0) sc->state |= PSM_EV_OPEN_R; return (err); } static int psm_ev_close_r(struct evdev_dev *evdev) { struct psm_softc *sc = evdev_get_softc(evdev); int err = 0; sc->state &= ~PSM_EV_OPEN_R; if (sc->state & (PSM_OPEN | PSM_EV_OPEN_A)) return (0); if (sc->state & PSM_VALID) err = psmclose(sc); return (err); } static int psm_ev_open_a(struct evdev_dev *evdev) { struct psm_softc *sc = evdev_get_softc(evdev); int err = 0; /* Get device data */ if ((sc->state & PSM_VALID) == 0) { /* the device is no longer valid/functioning */ return (ENXIO); } if (!(sc->state & (PSM_OPEN | PSM_EV_OPEN_R))) err = psmopen(sc); if (err == 0) sc->state |= PSM_EV_OPEN_A; return (err); } static int psm_ev_close_a(struct evdev_dev *evdev) { struct psm_softc *sc = evdev_get_softc(evdev); int err = 0; sc->state &= ~PSM_EV_OPEN_A; if (sc->state & (PSM_OPEN | PSM_EV_OPEN_R)) return (0); if (sc->state & PSM_VALID) err = psmclose(sc); return (err); } #endif static int psm_cdev_open(struct cdev *dev, int flag, int fmt, struct thread *td) { struct psm_softc *sc; int err = 0; /* Get device data */ sc = dev->si_drv1; if ((sc == NULL) || (sc->state & PSM_VALID) == 0) { /* the device is no longer valid/functioning */ return (ENXIO); } /* Disallow multiple opens */ if (sc->state & PSM_OPEN) return (EBUSY); device_busy(devclass_get_device(psm_devclass, sc->unit)); #ifdef EVDEV_SUPPORT /* Already opened by evdev */ if (!(sc->state & (PSM_EV_OPEN_R | PSM_EV_OPEN_A))) #endif err = psmopen(sc); if (err == 0) sc->state |= PSM_OPEN; else device_unbusy(devclass_get_device(psm_devclass, sc->unit)); return (err); } static int psm_cdev_close(struct cdev *dev, int flag, int fmt, struct thread *td) { struct psm_softc *sc; int err = 0; /* Get device data */ sc = dev->si_drv1; if ((sc == NULL) || (sc->state & PSM_VALID) == 0) { /* the device is no longer valid/functioning */ return (ENXIO); } #ifdef EVDEV_SUPPORT /* Still opened by evdev */ if (!(sc->state & (PSM_EV_OPEN_R | PSM_EV_OPEN_A))) #endif err = psmclose(sc); if (err == 0) { sc->state &= ~PSM_OPEN; /* clean up and sigio requests */ if (sc->async != NULL) { funsetown(&sc->async); sc->async = NULL; } device_unbusy(devclass_get_device(psm_devclass, sc->unit)); } return (err); } static int psmopen(struct psm_softc *sc) { int command_byte; int err; int s; /* Initialize state */ sc->mode.level = sc->dflt_mode.level; sc->mode.protocol = sc->dflt_mode.protocol; sc->watchdog = FALSE; sc->async = NULL; /* flush the event queue */ sc->queue.count = 0; sc->queue.head = 0; sc->queue.tail = 0; sc->status.flags = 0; sc->status.button = 0; sc->status.obutton = 0; sc->status.dx = 0; sc->status.dy = 0; sc->status.dz = 0; sc->button = 0; sc->pqueue_start = 0; sc->pqueue_end = 0; /* empty input buffer */ flushpackets(sc); sc->syncerrors = 0; sc->pkterrors = 0; /* don't let timeout routines in the keyboard driver to poll the kbdc */ if (!kbdc_lock(sc->kbdc, TRUE)) return (EIO); /* save the current controller command byte */ s = spltty(); command_byte = get_controller_command_byte(sc->kbdc); /* enable the aux port and temporalily disable the keyboard */ if (command_byte == -1 || !set_controller_command_byte(sc->kbdc, kbdc_get_device_mask(sc->kbdc), KBD_DISABLE_KBD_PORT | KBD_DISABLE_KBD_INT | KBD_ENABLE_AUX_PORT | KBD_DISABLE_AUX_INT)) { /* CONTROLLER ERROR; do you know how to get out of this? */ kbdc_lock(sc->kbdc, FALSE); splx(s); log(LOG_ERR, "psm%d: unable to set the command byte (psmopen).\n", sc->unit); return (EIO); } /* * Now that the keyboard controller is told not to generate * the keyboard and mouse interrupts, call `splx()' to allow * the other tty interrupts. The clock interrupt may also occur, * but timeout routines will be blocked by the poll flag set * via `kbdc_lock()' */ splx(s); /* enable the mouse device */ err = doopen(sc, command_byte); /* done */ kbdc_lock(sc->kbdc, FALSE); return (err); } static int psmclose(struct psm_softc *sc) { int stat[3]; int command_byte; int s; /* don't let timeout routines in the keyboard driver to poll the kbdc */ if (!kbdc_lock(sc->kbdc, TRUE)) return (EIO); /* save the current controller command byte */ s = spltty(); command_byte = get_controller_command_byte(sc->kbdc); if (command_byte == -1) { kbdc_lock(sc->kbdc, FALSE); splx(s); return (EIO); } /* disable the aux interrupt and temporalily disable the keyboard */ if (!set_controller_command_byte(sc->kbdc, kbdc_get_device_mask(sc->kbdc), KBD_DISABLE_KBD_PORT | KBD_DISABLE_KBD_INT | KBD_ENABLE_AUX_PORT | KBD_DISABLE_AUX_INT)) { log(LOG_ERR, "psm%d: failed to disable the aux int (psmclose).\n", sc->unit); /* CONTROLLER ERROR; * NOTE: we shall force our way through. Because the only * ill effect we shall see is that we may not be able * to read ACK from the mouse, and it doesn't matter much * so long as the mouse will accept the DISABLE command. */ } splx(s); /* stop the watchdog timer */ callout_stop(&sc->callout); /* remove anything left in the output buffer */ empty_aux_buffer(sc->kbdc, 10); /* disable the aux device, port and interrupt */ if (sc->state & PSM_VALID) { if (!disable_aux_dev(sc->kbdc)) { /* MOUSE ERROR; * NOTE: we don't return (error) and continue, * pretending we have successfully disabled the device. * It's OK because the interrupt routine will discard * any data from the mouse hereafter. */ log(LOG_ERR, "psm%d: failed to disable the device (psmclose).\n", sc->unit); } if (get_mouse_status(sc->kbdc, stat, 0, 3) < 3) log(LOG_DEBUG, "psm%d: failed to get status (psmclose).\n", sc->unit); } if (!set_controller_command_byte(sc->kbdc, kbdc_get_device_mask(sc->kbdc), (command_byte & KBD_KBD_CONTROL_BITS) | KBD_DISABLE_AUX_PORT | KBD_DISABLE_AUX_INT)) { /* * CONTROLLER ERROR; * we shall ignore this error; see the above comment. */ log(LOG_ERR, "psm%d: failed to disable the aux port (psmclose).\n", sc->unit); } /* remove anything left in the output buffer */ empty_aux_buffer(sc->kbdc, 10); /* close is almost always successful */ kbdc_lock(sc->kbdc, FALSE); return (0); } static int tame_mouse(struct psm_softc *sc, packetbuf_t *pb, mousestatus_t *status, u_char *buf) { static u_char butmapps2[8] = { 0, MOUSE_PS2_BUTTON1DOWN, MOUSE_PS2_BUTTON2DOWN, MOUSE_PS2_BUTTON1DOWN | MOUSE_PS2_BUTTON2DOWN, MOUSE_PS2_BUTTON3DOWN, MOUSE_PS2_BUTTON1DOWN | MOUSE_PS2_BUTTON3DOWN, MOUSE_PS2_BUTTON2DOWN | MOUSE_PS2_BUTTON3DOWN, MOUSE_PS2_BUTTON1DOWN | MOUSE_PS2_BUTTON2DOWN | MOUSE_PS2_BUTTON3DOWN, }; static u_char butmapmsc[8] = { MOUSE_MSC_BUTTON1UP | MOUSE_MSC_BUTTON2UP | MOUSE_MSC_BUTTON3UP, MOUSE_MSC_BUTTON2UP | MOUSE_MSC_BUTTON3UP, MOUSE_MSC_BUTTON1UP | MOUSE_MSC_BUTTON3UP, MOUSE_MSC_BUTTON3UP, MOUSE_MSC_BUTTON1UP | MOUSE_MSC_BUTTON2UP, MOUSE_MSC_BUTTON2UP, MOUSE_MSC_BUTTON1UP, 0, }; int mapped; int i; if (sc->mode.level == PSM_LEVEL_BASE) { mapped = status->button & ~MOUSE_BUTTON4DOWN; if (status->button & MOUSE_BUTTON4DOWN) mapped |= MOUSE_BUTTON1DOWN; status->button = mapped; buf[0] = MOUSE_PS2_SYNC | butmapps2[mapped & MOUSE_STDBUTTONS]; i = imax(imin(status->dx, 255), -256); if (i < 0) buf[0] |= MOUSE_PS2_XNEG; buf[1] = i; i = imax(imin(status->dy, 255), -256); if (i < 0) buf[0] |= MOUSE_PS2_YNEG; buf[2] = i; return (MOUSE_PS2_PACKETSIZE); } else if (sc->mode.level == PSM_LEVEL_STANDARD) { buf[0] = MOUSE_MSC_SYNC | butmapmsc[status->button & MOUSE_STDBUTTONS]; i = imax(imin(status->dx, 255), -256); buf[1] = i >> 1; buf[3] = i - buf[1]; i = imax(imin(status->dy, 255), -256); buf[2] = i >> 1; buf[4] = i - buf[2]; i = imax(imin(status->dz, 127), -128); buf[5] = (i >> 1) & 0x7f; buf[6] = (i - (i >> 1)) & 0x7f; buf[7] = (~status->button >> 3) & 0x7f; return (MOUSE_SYS_PACKETSIZE); } return (pb->inputbytes); } static int psmread(struct cdev *dev, struct uio *uio, int flag) { struct psm_softc *sc = dev->si_drv1; u_char buf[PSM_SMALLBUFSIZE]; int error = 0; int s; int l; if ((sc->state & PSM_VALID) == 0) return (EIO); /* block until mouse activity occurred */ s = spltty(); while (sc->queue.count <= 0) { if (dev != sc->bdev) { splx(s); return (EWOULDBLOCK); } sc->state |= PSM_ASLP; error = tsleep(sc, PZERO | PCATCH, "psmrea", 0); sc->state &= ~PSM_ASLP; if (error) { splx(s); return (error); } else if ((sc->state & PSM_VALID) == 0) { /* the device disappeared! */ splx(s); return (EIO); } } splx(s); /* copy data to the user land */ while ((sc->queue.count > 0) && (uio->uio_resid > 0)) { s = spltty(); l = imin(sc->queue.count, uio->uio_resid); if (l > sizeof(buf)) l = sizeof(buf); if (l > sizeof(sc->queue.buf) - sc->queue.head) { bcopy(&sc->queue.buf[sc->queue.head], &buf[0], sizeof(sc->queue.buf) - sc->queue.head); bcopy(&sc->queue.buf[0], &buf[sizeof(sc->queue.buf) - sc->queue.head], l - (sizeof(sc->queue.buf) - sc->queue.head)); } else bcopy(&sc->queue.buf[sc->queue.head], &buf[0], l); sc->queue.count -= l; sc->queue.head = (sc->queue.head + l) % sizeof(sc->queue.buf); splx(s); error = uiomove(buf, l, uio); if (error) break; } return (error); } static int block_mouse_data(struct psm_softc *sc, int *c) { int s; if (!kbdc_lock(sc->kbdc, TRUE)) return (EIO); s = spltty(); *c = get_controller_command_byte(sc->kbdc); if ((*c == -1) || !set_controller_command_byte(sc->kbdc, kbdc_get_device_mask(sc->kbdc), KBD_DISABLE_KBD_PORT | KBD_DISABLE_KBD_INT | KBD_ENABLE_AUX_PORT | KBD_DISABLE_AUX_INT)) { /* this is CONTROLLER ERROR */ splx(s); kbdc_lock(sc->kbdc, FALSE); return (EIO); } /* * The device may be in the middle of status data transmission. * The transmission will be interrupted, thus, incomplete status * data must be discarded. Although the aux interrupt is disabled * at the keyboard controller level, at most one aux interrupt * may have already been pending and a data byte is in the * output buffer; throw it away. Note that the second argument * to `empty_aux_buffer()' is zero, so that the call will just * flush the internal queue. * `psmintr()' will be invoked after `splx()' if an interrupt is * pending; it will see no data and returns immediately. */ empty_aux_buffer(sc->kbdc, 0); /* flush the queue */ read_aux_data_no_wait(sc->kbdc); /* throw away data if any */ flushpackets(sc); splx(s); return (0); } static void dropqueue(struct psm_softc *sc) { sc->queue.count = 0; sc->queue.head = 0; sc->queue.tail = 0; if ((sc->state & PSM_SOFTARMED) != 0) { sc->state &= ~PSM_SOFTARMED; callout_stop(&sc->softcallout); } sc->pqueue_start = sc->pqueue_end; } static void flushpackets(struct psm_softc *sc) { dropqueue(sc); bzero(&sc->pqueue, sizeof(sc->pqueue)); } static int unblock_mouse_data(struct psm_softc *sc, int c) { int error = 0; /* * We may have seen a part of status data during `set_mouse_XXX()'. * they have been queued; flush it. */ empty_aux_buffer(sc->kbdc, 0); /* restore ports and interrupt */ if (!set_controller_command_byte(sc->kbdc, kbdc_get_device_mask(sc->kbdc), c & (KBD_KBD_CONTROL_BITS | KBD_AUX_CONTROL_BITS))) { /* * CONTROLLER ERROR; this is serious, we may have * been left with the inaccessible keyboard and * the disabled mouse interrupt. */ error = EIO; } kbdc_lock(sc->kbdc, FALSE); return (error); } static int psmwrite(struct cdev *dev, struct uio *uio, int flag) { struct psm_softc *sc = dev->si_drv1; u_char buf[PSM_SMALLBUFSIZE]; int error = 0, i, l; if ((sc->state & PSM_VALID) == 0) return (EIO); if (sc->mode.level < PSM_LEVEL_NATIVE) return (ENODEV); /* copy data from the user land */ while (uio->uio_resid > 0) { l = imin(PSM_SMALLBUFSIZE, uio->uio_resid); error = uiomove(buf, l, uio); if (error) break; for (i = 0; i < l; i++) { VLOG(4, (LOG_DEBUG, "psm: cmd 0x%x\n", buf[i])); if (!write_aux_command(sc->kbdc, buf[i])) { VLOG(2, (LOG_DEBUG, "psm: cmd 0x%x failed.\n", buf[i])); return (reinitialize(sc, FALSE)); } } } return (error); } static int psmioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct psm_softc *sc = dev->si_drv1; mousemode_t mode; mousestatus_t status; mousedata_t *data; int stat[3]; int command_byte; int error = 0; int s; /* Perform IOCTL command */ switch (cmd) { case OLD_MOUSE_GETHWINFO: s = spltty(); ((old_mousehw_t *)addr)->buttons = sc->hw.buttons; ((old_mousehw_t *)addr)->iftype = sc->hw.iftype; ((old_mousehw_t *)addr)->type = sc->hw.type; ((old_mousehw_t *)addr)->hwid = sc->hw.hwid & 0x00ff; splx(s); break; case MOUSE_GETHWINFO: s = spltty(); *(mousehw_t *)addr = sc->hw; if (sc->mode.level == PSM_LEVEL_BASE) ((mousehw_t *)addr)->model = MOUSE_MODEL_GENERIC; splx(s); break; case MOUSE_SYN_GETHWINFO: s = spltty(); if (sc->synhw.infoMajor >= 4) *(synapticshw_t *)addr = sc->synhw; else error = EINVAL; splx(s); break; case OLD_MOUSE_GETMODE: s = spltty(); switch (sc->mode.level) { case PSM_LEVEL_BASE: ((old_mousemode_t *)addr)->protocol = MOUSE_PROTO_PS2; break; case PSM_LEVEL_STANDARD: ((old_mousemode_t *)addr)->protocol = MOUSE_PROTO_SYSMOUSE; break; case PSM_LEVEL_NATIVE: ((old_mousemode_t *)addr)->protocol = MOUSE_PROTO_PS2; break; } ((old_mousemode_t *)addr)->rate = sc->mode.rate; ((old_mousemode_t *)addr)->resolution = sc->mode.resolution; ((old_mousemode_t *)addr)->accelfactor = sc->mode.accelfactor; splx(s); break; case MOUSE_GETMODE: s = spltty(); *(mousemode_t *)addr = sc->mode; if ((sc->flags & PSM_NEED_SYNCBITS) != 0) { ((mousemode_t *)addr)->syncmask[0] = 0; ((mousemode_t *)addr)->syncmask[1] = 0; } ((mousemode_t *)addr)->resolution = MOUSE_RES_LOW - sc->mode.resolution; switch (sc->mode.level) { case PSM_LEVEL_BASE: ((mousemode_t *)addr)->protocol = MOUSE_PROTO_PS2; ((mousemode_t *)addr)->packetsize = MOUSE_PS2_PACKETSIZE; break; case PSM_LEVEL_STANDARD: ((mousemode_t *)addr)->protocol = MOUSE_PROTO_SYSMOUSE; ((mousemode_t *)addr)->packetsize = MOUSE_SYS_PACKETSIZE; ((mousemode_t *)addr)->syncmask[0] = MOUSE_SYS_SYNCMASK; ((mousemode_t *)addr)->syncmask[1] = MOUSE_SYS_SYNC; break; case PSM_LEVEL_NATIVE: /* FIXME: this isn't quite correct... XXX */ ((mousemode_t *)addr)->protocol = MOUSE_PROTO_PS2; break; } splx(s); break; case OLD_MOUSE_SETMODE: case MOUSE_SETMODE: if (cmd == OLD_MOUSE_SETMODE) { mode.rate = ((old_mousemode_t *)addr)->rate; /* * resolution old I/F new I/F * default 0 0 * low 1 -2 * medium low 2 -3 * medium high 3 -4 * high 4 -5 */ if (((old_mousemode_t *)addr)->resolution > 0) mode.resolution = -((old_mousemode_t *)addr)->resolution - 1; else mode.resolution = 0; mode.accelfactor = ((old_mousemode_t *)addr)->accelfactor; mode.level = -1; } else mode = *(mousemode_t *)addr; /* adjust and validate parameters. */ if (mode.rate > UCHAR_MAX) return (EINVAL); if (mode.rate == 0) mode.rate = sc->dflt_mode.rate; else if (mode.rate == -1) /* don't change the current setting */ ; else if (mode.rate < 0) return (EINVAL); if (mode.resolution >= UCHAR_MAX) return (EINVAL); if (mode.resolution >= 200) mode.resolution = MOUSE_RES_HIGH; else if (mode.resolution >= 100) mode.resolution = MOUSE_RES_MEDIUMHIGH; else if (mode.resolution >= 50) mode.resolution = MOUSE_RES_MEDIUMLOW; else if (mode.resolution > 0) mode.resolution = MOUSE_RES_LOW; if (mode.resolution == MOUSE_RES_DEFAULT) mode.resolution = sc->dflt_mode.resolution; else if (mode.resolution == -1) /* don't change the current setting */ ; else if (mode.resolution < 0) /* MOUSE_RES_LOW/MEDIUM/HIGH */ mode.resolution = MOUSE_RES_LOW - mode.resolution; if (mode.level == -1) /* don't change the current setting */ mode.level = sc->mode.level; else if ((mode.level < PSM_LEVEL_MIN) || (mode.level > PSM_LEVEL_MAX)) return (EINVAL); if (mode.accelfactor == -1) /* don't change the current setting */ mode.accelfactor = sc->mode.accelfactor; else if (mode.accelfactor < 0) return (EINVAL); /* don't allow anybody to poll the keyboard controller */ error = block_mouse_data(sc, &command_byte); if (error) return (error); /* set mouse parameters */ if (mode.rate > 0) mode.rate = set_mouse_sampling_rate(sc->kbdc, mode.rate); if (mode.resolution >= 0) mode.resolution = set_mouse_resolution(sc->kbdc, mode.resolution); set_mouse_scaling(sc->kbdc, 1); get_mouse_status(sc->kbdc, stat, 0, 3); s = spltty(); sc->mode.rate = mode.rate; sc->mode.resolution = mode.resolution; sc->mode.accelfactor = mode.accelfactor; sc->mode.level = mode.level; splx(s); unblock_mouse_data(sc, command_byte); break; case MOUSE_GETLEVEL: *(int *)addr = sc->mode.level; break; case MOUSE_SETLEVEL: if ((*(int *)addr < PSM_LEVEL_MIN) || (*(int *)addr > PSM_LEVEL_MAX)) return (EINVAL); sc->mode.level = *(int *)addr; break; case MOUSE_GETSTATUS: s = spltty(); status = sc->status; sc->status.flags = 0; sc->status.obutton = sc->status.button; sc->status.button = 0; sc->status.dx = 0; sc->status.dy = 0; sc->status.dz = 0; splx(s); *(mousestatus_t *)addr = status; break; case MOUSE_READSTATE: case MOUSE_READDATA: data = (mousedata_t *)addr; if (data->len > sizeof(data->buf)/sizeof(data->buf[0])) return (EINVAL); error = block_mouse_data(sc, &command_byte); if (error) return (error); if ((data->len = get_mouse_status(sc->kbdc, data->buf, (cmd == MOUSE_READDATA) ? 1 : 0, data->len)) <= 0) error = EIO; unblock_mouse_data(sc, command_byte); break; #if (defined(MOUSE_SETRESOLUTION)) case MOUSE_SETRESOLUTION: mode.resolution = *(int *)addr; if (mode.resolution >= UCHAR_MAX) return (EINVAL); else if (mode.resolution >= 200) mode.resolution = MOUSE_RES_HIGH; else if (mode.resolution >= 100) mode.resolution = MOUSE_RES_MEDIUMHIGH; else if (mode.resolution >= 50) mode.resolution = MOUSE_RES_MEDIUMLOW; else if (mode.resolution > 0) mode.resolution = MOUSE_RES_LOW; if (mode.resolution == MOUSE_RES_DEFAULT) mode.resolution = sc->dflt_mode.resolution; else if (mode.resolution == -1) mode.resolution = sc->mode.resolution; else if (mode.resolution < 0) /* MOUSE_RES_LOW/MEDIUM/HIGH */ mode.resolution = MOUSE_RES_LOW - mode.resolution; error = block_mouse_data(sc, &command_byte); if (error) return (error); sc->mode.resolution = set_mouse_resolution(sc->kbdc, mode.resolution); if (sc->mode.resolution != mode.resolution) error = EIO; unblock_mouse_data(sc, command_byte); break; #endif /* MOUSE_SETRESOLUTION */ #if (defined(MOUSE_SETRATE)) case MOUSE_SETRATE: mode.rate = *(int *)addr; if (mode.rate > UCHAR_MAX) return (EINVAL); if (mode.rate == 0) mode.rate = sc->dflt_mode.rate; else if (mode.rate < 0) mode.rate = sc->mode.rate; error = block_mouse_data(sc, &command_byte); if (error) return (error); sc->mode.rate = set_mouse_sampling_rate(sc->kbdc, mode.rate); if (sc->mode.rate != mode.rate) error = EIO; unblock_mouse_data(sc, command_byte); break; #endif /* MOUSE_SETRATE */ #if (defined(MOUSE_SETSCALING)) case MOUSE_SETSCALING: if ((*(int *)addr <= 0) || (*(int *)addr > 2)) return (EINVAL); error = block_mouse_data(sc, &command_byte); if (error) return (error); if (!set_mouse_scaling(sc->kbdc, *(int *)addr)) error = EIO; unblock_mouse_data(sc, command_byte); break; #endif /* MOUSE_SETSCALING */ #if (defined(MOUSE_GETHWID)) case MOUSE_GETHWID: error = block_mouse_data(sc, &command_byte); if (error) return (error); sc->hw.hwid &= ~0x00ff; sc->hw.hwid |= get_aux_id(sc->kbdc); *(int *)addr = sc->hw.hwid & 0x00ff; unblock_mouse_data(sc, command_byte); break; #endif /* MOUSE_GETHWID */ case FIONBIO: case FIOASYNC: break; case FIOSETOWN: error = fsetown(*(int *)addr, &sc->async); break; case FIOGETOWN: *(int *) addr = fgetown(&sc->async); break; default: return (ENOTTY); } return (error); } static void psmtimeout(void *arg) { struct psm_softc *sc; int s; sc = (struct psm_softc *)arg; s = spltty(); if (sc->watchdog && kbdc_lock(sc->kbdc, TRUE)) { VLOG(6, (LOG_DEBUG, "psm%d: lost interrupt?\n", sc->unit)); psmintr(sc); kbdc_lock(sc->kbdc, FALSE); } sc->watchdog = TRUE; splx(s); callout_reset(&sc->callout, hz, psmtimeout, sc); } /* Add all sysctls under the debug.psm and hw.psm nodes */ static SYSCTL_NODE(_debug, OID_AUTO, psm, CTLFLAG_RD, 0, "ps/2 mouse"); static SYSCTL_NODE(_hw, OID_AUTO, psm, CTLFLAG_RD, 0, "ps/2 mouse"); SYSCTL_INT(_debug_psm, OID_AUTO, loglevel, CTLFLAG_RWTUN, &verbose, 0, "Verbosity level"); static int psmhz = 20; SYSCTL_INT(_debug_psm, OID_AUTO, hz, CTLFLAG_RW, &psmhz, 0, "Frequency of the softcallout (in hz)"); static int psmerrsecs = 2; SYSCTL_INT(_debug_psm, OID_AUTO, errsecs, CTLFLAG_RW, &psmerrsecs, 0, "Number of seconds during which packets will dropped after a sync error"); static int psmerrusecs = 0; SYSCTL_INT(_debug_psm, OID_AUTO, errusecs, CTLFLAG_RW, &psmerrusecs, 0, "Microseconds to add to psmerrsecs"); static int psmsecs = 0; SYSCTL_INT(_debug_psm, OID_AUTO, secs, CTLFLAG_RW, &psmsecs, 0, "Max number of seconds between soft interrupts"); static int psmusecs = 500000; SYSCTL_INT(_debug_psm, OID_AUTO, usecs, CTLFLAG_RW, &psmusecs, 0, "Microseconds to add to psmsecs"); static int pkterrthresh = 2; SYSCTL_INT(_debug_psm, OID_AUTO, pkterrthresh, CTLFLAG_RW, &pkterrthresh, 0, "Number of error packets allowed before reinitializing the mouse"); SYSCTL_INT(_hw_psm, OID_AUTO, tap_enabled, CTLFLAG_RWTUN, &tap_enabled, 0, "Enable tap and drag gestures"); static int tap_threshold = PSM_TAP_THRESHOLD; SYSCTL_INT(_hw_psm, OID_AUTO, tap_threshold, CTLFLAG_RW, &tap_threshold, 0, "Button tap threshold"); static int tap_timeout = PSM_TAP_TIMEOUT; SYSCTL_INT(_hw_psm, OID_AUTO, tap_timeout, CTLFLAG_RW, &tap_timeout, 0, "Tap timeout for touchpads"); /* Tunables */ SYSCTL_INT(_hw_psm, OID_AUTO, synaptics_support, CTLFLAG_RDTUN, &synaptics_support, 0, "Enable support for Synaptics touchpads"); SYSCTL_INT(_hw_psm, OID_AUTO, trackpoint_support, CTLFLAG_RDTUN, &trackpoint_support, 0, "Enable support for IBM/Lenovo TrackPoint"); SYSCTL_INT(_hw_psm, OID_AUTO, elantech_support, CTLFLAG_RDTUN, &elantech_support, 0, "Enable support for Elantech touchpads"); static void psmintr(void *arg) { struct psm_softc *sc = arg; struct timeval now; int c; packetbuf_t *pb; + if (aux_mux_is_enabled(sc->kbdc)) + VLOG(2, (LOG_DEBUG, "psmintr: active multiplexing mode is not " + "supported!\n")); /* read until there is nothing to read */ while((c = read_aux_data_no_wait(sc->kbdc)) != -1) { pb = &sc->pqueue[sc->pqueue_end]; /* discard the byte if the device is not open */ if (!(sc->state & (PSM_OPEN | PSM_EV_OPEN_R | PSM_EV_OPEN_A))) continue; getmicrouptime(&now); if ((pb->inputbytes > 0) && timevalcmp(&now, &sc->inputtimeout, >)) { VLOG(3, (LOG_DEBUG, "psmintr: delay too long; " "resetting byte count\n")); pb->inputbytes = 0; sc->syncerrors = 0; sc->pkterrors = 0; } sc->inputtimeout.tv_sec = PSM_INPUT_TIMEOUT / 1000000; sc->inputtimeout.tv_usec = PSM_INPUT_TIMEOUT % 1000000; timevaladd(&sc->inputtimeout, &now); pb->ipacket[pb->inputbytes++] = c; if (sc->mode.level == PSM_LEVEL_NATIVE) { VLOG(4, (LOG_DEBUG, "psmintr: %02x\n", pb->ipacket[0])); sc->syncerrors = 0; sc->pkterrors = 0; goto next; } else { if (pb->inputbytes < sc->mode.packetsize) continue; VLOG(4, (LOG_DEBUG, "psmintr: %02x %02x %02x %02x %02x %02x\n", pb->ipacket[0], pb->ipacket[1], pb->ipacket[2], pb->ipacket[3], pb->ipacket[4], pb->ipacket[5])); } c = pb->ipacket[0]; if ((sc->flags & PSM_NEED_SYNCBITS) != 0) { sc->mode.syncmask[1] = (c & sc->mode.syncmask[0]); sc->flags &= ~PSM_NEED_SYNCBITS; VLOG(2, (LOG_DEBUG, "psmintr: Sync bytes now %04x,%04x\n", sc->mode.syncmask[0], sc->mode.syncmask[1])); } else if ((sc->config & PSM_CONFIG_NOCHECKSYNC) == 0 && (c & sc->mode.syncmask[0]) != sc->mode.syncmask[1]) { VLOG(3, (LOG_DEBUG, "psmintr: out of sync " "(%04x != %04x) %d cmds since last error.\n", c & sc->mode.syncmask[0], sc->mode.syncmask[1], sc->cmdcount - sc->lasterr)); sc->lasterr = sc->cmdcount; /* * The sync byte test is a weak measure of packet * validity. Conservatively discard any input yet * to be seen by userland when we detect a sync * error since there is a good chance some of * the queued packets have undetected errors. */ dropqueue(sc); if (sc->syncerrors == 0) sc->pkterrors++; ++sc->syncerrors; sc->lastinputerr = now; if (sc->syncerrors >= sc->mode.packetsize * 2 || sc->pkterrors >= pkterrthresh) { /* * If we've failed to find a single sync byte * in 2 packets worth of data, or we've seen * persistent packet errors during the * validation period, reinitialize the mouse * in hopes of returning it to the expected * mode. */ VLOG(3, (LOG_DEBUG, "psmintr: reset the mouse.\n")); reinitialize(sc, TRUE); } else if (sc->syncerrors == sc->mode.packetsize) { /* * Try a soft reset after searching for a sync * byte through a packet length of bytes. */ VLOG(3, (LOG_DEBUG, "psmintr: re-enable the mouse.\n")); pb->inputbytes = 0; disable_aux_dev(sc->kbdc); enable_aux_dev(sc->kbdc); } else { VLOG(3, (LOG_DEBUG, "psmintr: discard a byte (%d)\n", sc->syncerrors)); pb->inputbytes--; bcopy(&pb->ipacket[1], &pb->ipacket[0], pb->inputbytes); } continue; } /* * We have what appears to be a valid packet. * Reset the error counters. */ sc->syncerrors = 0; /* * Drop even good packets if they occur within a timeout * period of a sync error. This allows the detection of * a change in the mouse's packet mode without exposing * erratic mouse behavior to the user. Some KVMs forget * enhanced mouse modes during switch events. */ if (!timeelapsed(&sc->lastinputerr, psmerrsecs, psmerrusecs, &now)) { pb->inputbytes = 0; continue; } /* * Now that we're out of the validation period, reset * the packet error count. */ sc->pkterrors = 0; sc->cmdcount++; next: if (++sc->pqueue_end >= PSM_PACKETQUEUE) sc->pqueue_end = 0; /* * If we've filled the queue then call the softintr ourselves, * otherwise schedule the interrupt for later. */ if (!timeelapsed(&sc->lastsoftintr, psmsecs, psmusecs, &now) || (sc->pqueue_end == sc->pqueue_start)) { if ((sc->state & PSM_SOFTARMED) != 0) { sc->state &= ~PSM_SOFTARMED; callout_stop(&sc->softcallout); } psmsoftintr(arg); } else if ((sc->state & PSM_SOFTARMED) == 0) { sc->state |= PSM_SOFTARMED; callout_reset(&sc->softcallout, psmhz < 1 ? 1 : (hz/psmhz), psmsoftintr, arg); } } } static void proc_mmanplus(struct psm_softc *sc, packetbuf_t *pb, mousestatus_t *ms, int *x, int *y, int *z) { /* * PS2++ protocol packet * * b7 b6 b5 b4 b3 b2 b1 b0 * byte 1: * 1 p3 p2 1 * * * * byte 2: c1 c2 p1 p0 d1 d0 1 0 * * p3-p0: packet type * c1, c2: c1 & c2 == 1, if p2 == 0 * c1 & c2 == 0, if p2 == 1 * * packet type: 0 (device type) * See comments in enable_mmanplus() below. * * packet type: 1 (wheel data) * * b7 b6 b5 b4 b3 b2 b1 b0 * byte 3: h * B5 B4 s d2 d1 d0 * * h: 1, if horizontal roller data * 0, if vertical roller data * B4, B5: button 4 and 5 * s: sign bit * d2-d0: roller data * * packet type: 2 (reserved) */ if (((pb->ipacket[0] & MOUSE_PS2PLUS_SYNCMASK) == MOUSE_PS2PLUS_SYNC) && (abs(*x) > 191) && MOUSE_PS2PLUS_CHECKBITS(pb->ipacket)) { /* * the extended data packet encodes button * and wheel events */ switch (MOUSE_PS2PLUS_PACKET_TYPE(pb->ipacket)) { case 1: /* wheel data packet */ *x = *y = 0; if (pb->ipacket[2] & 0x80) { /* XXX horizontal roller count - ignore it */ ; } else { /* vertical roller count */ *z = (pb->ipacket[2] & MOUSE_PS2PLUS_ZNEG) ? (pb->ipacket[2] & 0x0f) - 16 : (pb->ipacket[2] & 0x0f); } ms->button |= (pb->ipacket[2] & MOUSE_PS2PLUS_BUTTON4DOWN) ? MOUSE_BUTTON4DOWN : 0; ms->button |= (pb->ipacket[2] & MOUSE_PS2PLUS_BUTTON5DOWN) ? MOUSE_BUTTON5DOWN : 0; break; case 2: /* * this packet type is reserved by * Logitech... */ /* * IBM ScrollPoint Mouse uses this * packet type to encode both vertical * and horizontal scroll movement. */ *x = *y = 0; /* horizontal count */ if (pb->ipacket[2] & 0x0f) *z = (pb->ipacket[2] & MOUSE_SPOINT_WNEG) ? -2 : 2; /* vertical count */ if (pb->ipacket[2] & 0xf0) *z = (pb->ipacket[2] & MOUSE_SPOINT_ZNEG) ? -1 : 1; break; case 0: /* device type packet - shouldn't happen */ /* FALLTHROUGH */ default: *x = *y = 0; ms->button = ms->obutton; VLOG(1, (LOG_DEBUG, "psmintr: unknown PS2++ packet " "type %d: 0x%02x 0x%02x 0x%02x\n", MOUSE_PS2PLUS_PACKET_TYPE(pb->ipacket), pb->ipacket[0], pb->ipacket[1], pb->ipacket[2])); break; } } else { /* preserve button states */ ms->button |= ms->obutton & MOUSE_EXTBUTTONS; } } static int proc_synaptics(struct psm_softc *sc, packetbuf_t *pb, mousestatus_t *ms, int *x, int *y, int *z) { static int touchpad_buttons; static int guest_buttons; static finger_t f[PSM_FINGERS]; int w, id, nfingers, ewcode, extended_buttons, clickpad_pressed; extended_buttons = 0; /* TouchPad PS/2 absolute mode message format with capFourButtons: * * Bits: 7 6 5 4 3 2 1 0 (LSB) * ------------------------------------------------ * ipacket[0]: 1 0 W3 W2 0 W1 R L * ipacket[1]: Yb Ya Y9 Y8 Xb Xa X9 X8 * ipacket[2]: Z7 Z6 Z5 Z4 Z3 Z2 Z1 Z0 * ipacket[3]: 1 1 Yc Xc 0 W0 D^R U^L * ipacket[4]: X7 X6 X5 X4 X3 X2 X1 X0 * ipacket[5]: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 * * Legend: * L: left physical mouse button * R: right physical mouse button * D: down button * U: up button * W: "wrist" value * X: x position * Y: y position * Z: pressure * * Without capFourButtons but with nExtendeButtons and/or capMiddle * * Bits: 7 6 5 4 3 2 1 0 (LSB) * ------------------------------------------------------ * ipacket[3]: 1 1 Yc Xc 0 W0 E^R M^L * ipacket[4]: X7 X6 X5 X4 X3|b7 X2|b5 X1|b3 X0|b1 * ipacket[5]: Y7 Y6 Y5 Y4 Y3|b8 Y2|b6 Y1|b4 Y0|b2 * * Legend: * M: Middle physical mouse button * E: Extended mouse buttons reported instead of low bits of X and Y * b1-b8: Extended mouse buttons * Only ((nExtendedButtons + 1) >> 1) bits are used in packet * 4 and 5, for reading X and Y value they should be zeroed. * * Absolute reportable limits: 0 - 6143. * Typical bezel limits: 1472 - 5472. * Typical edge marings: 1632 - 5312. * * w = 3 Passthrough Packet * * Byte 2,5,6 == Byte 1,2,3 of "Guest" */ if (!synaptics_support) return (0); /* Sanity check for out of sync packets. */ if ((pb->ipacket[0] & 0xc8) != 0x80 || (pb->ipacket[3] & 0xc8) != 0xc0) return (-1); *x = *y = 0; ms->button = ms->obutton; /* * Pressure value. * Interpretation: * z = 0 No finger contact * z = 10 Finger hovering near the pad * z = 30 Very light finger contact * z = 80 Normal finger contact * z = 110 Very heavy finger contact * z = 200 Finger lying flat on pad surface * z = 255 Maximum reportable Z */ *z = pb->ipacket[2]; /* * Finger width value * Interpretation: * w = 0 Two finger on the pad (capMultiFinger needed) * w = 1 Three or more fingers (capMultiFinger needed) * w = 2 Pen (instead of finger) (capPen needed) * w = 3 Reserved (passthrough?) * w = 4-7 Finger of normal width (capPalmDetect needed) * w = 8-14 Very wide finger or palm (capPalmDetect needed) * w = 15 Maximum reportable width (capPalmDetect needed) */ /* XXX Is checking capExtended enough? */ if (sc->synhw.capExtended) w = ((pb->ipacket[0] & 0x30) >> 2) | ((pb->ipacket[0] & 0x04) >> 1) | ((pb->ipacket[3] & 0x04) >> 2); else { /* Assume a finger of regular width. */ w = 4; } switch (w) { case 3: /* * Handle packets from the guest device. See: * Synaptics PS/2 TouchPad Interfacing Guide, Section 5.1 */ - if (sc->synhw.capPassthrough) { + if (sc->synhw.capPassthrough || sc->muxport != PSM_NOMUX) { *x = ((pb->ipacket[1] & 0x10) ? pb->ipacket[4] - 256 : pb->ipacket[4]); *y = ((pb->ipacket[1] & 0x20) ? pb->ipacket[5] - 256 : pb->ipacket[5]); *z = 0; guest_buttons = 0; if (pb->ipacket[1] & 0x01) guest_buttons |= MOUSE_BUTTON1DOWN; if (pb->ipacket[1] & 0x04) guest_buttons |= MOUSE_BUTTON2DOWN; if (pb->ipacket[1] & 0x02) guest_buttons |= MOUSE_BUTTON3DOWN; #ifdef EVDEV_SUPPORT if (evdev_rcpt_mask & EVDEV_RCPT_HW_MOUSE) { evdev_push_rel(sc->evdev_r, REL_X, *x); evdev_push_rel(sc->evdev_r, REL_Y, -*y); evdev_push_mouse_btn(sc->evdev_r, guest_buttons); evdev_sync(sc->evdev_r); } #endif ms->button = touchpad_buttons | guest_buttons | sc->extended_buttons; } goto SYNAPTICS_END; case 2: /* Handle Extended W mode packets */ ewcode = (pb->ipacket[5] & 0xf0) >> 4; #if PSM_FINGERS > 1 switch (ewcode) { case 1: /* Secondary finger */ if (sc->synhw.capAdvancedGestures) f[1] = (finger_t) { .x = (((pb->ipacket[4] & 0x0f) << 8) | pb->ipacket[1]) << 1, .y = (((pb->ipacket[4] & 0xf0) << 4) | pb->ipacket[2]) << 1, .p = ((pb->ipacket[3] & 0x30) | (pb->ipacket[5] & 0x0f)) << 1, .w = PSM_FINGER_DEFAULT_W, .flags = PSM_FINGER_FUZZY, }; else if (sc->synhw.capReportsV) f[1] = (finger_t) { .x = (((pb->ipacket[4] & 0x0f) << 8) | (pb->ipacket[1] & 0xfe)) << 1, .y = (((pb->ipacket[4] & 0xf0) << 4) | (pb->ipacket[2] & 0xfe)) << 1, .p = ((pb->ipacket[3] & 0x30) | (pb->ipacket[5] & 0x0e)) << 1, .w = (((pb->ipacket[5] & 0x01) << 2) | ((pb->ipacket[2] & 0x01) << 1) | (pb->ipacket[1] & 0x01)) + 8, .flags = PSM_FINGER_FUZZY, }; default: break; } #endif goto SYNAPTICS_END; case 1: case 0: nfingers = w + 2; break; default: nfingers = 1; } if (sc->syninfo.touchpad_off) goto SYNAPTICS_END; /* Button presses */ touchpad_buttons = 0; if (pb->ipacket[0] & 0x01) touchpad_buttons |= MOUSE_BUTTON1DOWN; if (pb->ipacket[0] & 0x02) touchpad_buttons |= MOUSE_BUTTON3DOWN; if (sc->synhw.capExtended && sc->synhw.capFourButtons) { if ((pb->ipacket[3] ^ pb->ipacket[0]) & 0x01) touchpad_buttons |= MOUSE_BUTTON4DOWN; if ((pb->ipacket[3] ^ pb->ipacket[0]) & 0x02) touchpad_buttons |= MOUSE_BUTTON5DOWN; } else if (sc->synhw.capExtended && sc->synhw.capMiddle && !sc->synhw.capClickPad) { /* Middle Button */ if ((pb->ipacket[0] ^ pb->ipacket[3]) & 0x01) touchpad_buttons |= MOUSE_BUTTON2DOWN; } else if (sc->synhw.capExtended && (sc->synhw.nExtendedButtons > 0)) { /* Extended Buttons */ if ((pb->ipacket[0] ^ pb->ipacket[3]) & 0x02) { if (sc->syninfo.directional_scrolls) { if (pb->ipacket[4] & 0x01) extended_buttons |= MOUSE_BUTTON4DOWN; if (pb->ipacket[5] & 0x01) extended_buttons |= MOUSE_BUTTON5DOWN; if (pb->ipacket[4] & 0x02) extended_buttons |= MOUSE_BUTTON6DOWN; if (pb->ipacket[5] & 0x02) extended_buttons |= MOUSE_BUTTON7DOWN; } else { if (pb->ipacket[4] & 0x01) extended_buttons |= MOUSE_BUTTON1DOWN; if (pb->ipacket[5] & 0x01) extended_buttons |= MOUSE_BUTTON3DOWN; if (pb->ipacket[4] & 0x02) extended_buttons |= MOUSE_BUTTON2DOWN; sc->extended_buttons = extended_buttons; } /* * Zero out bits used by extended buttons to avoid * misinterpretation of the data absolute position. * * The bits represented by * * (nExtendedButtons + 1) >> 1 * * will be masked out in both bytes. * The mask for n bits is computed with the formula * * (1 << n) - 1 */ int maskedbits = 0; int mask = 0; maskedbits = (sc->synhw.nExtendedButtons + 1) >> 1; mask = (1 << maskedbits) - 1; #ifdef EVDEV_SUPPORT int i; if (evdev_rcpt_mask & EVDEV_RCPT_HW_MOUSE) { if (sc->synhw.capPassthrough) { evdev_push_mouse_btn(sc->evdev_r, extended_buttons); evdev_sync(sc->evdev_r); } for (i = 0; i < maskedbits; i++) { evdev_push_key(sc->evdev_a, BTN_0 + i * 2, pb->ipacket[4] & (1 << i)); evdev_push_key(sc->evdev_a, BTN_0 + i * 2 + 1, pb->ipacket[5] & (1 << i)); } } #endif pb->ipacket[4] &= ~(mask); pb->ipacket[5] &= ~(mask); } else if (!sc->syninfo.directional_scrolls && !sc->gesture.in_vscroll) { /* * Keep reporting MOUSE DOWN until we get a new packet * indicating otherwise. */ extended_buttons |= sc->extended_buttons; } } if (sc->synhw.capReportsV && nfingers > 1) f[0] = (finger_t) { .x = ((pb->ipacket[3] & 0x10) << 8) | ((pb->ipacket[1] & 0x0f) << 8) | (pb->ipacket[4] & 0xfd), .y = ((pb->ipacket[3] & 0x20) << 7) | ((pb->ipacket[1] & 0xf0) << 4) | (pb->ipacket[5] & 0xfd), .p = *z & 0xfe, .w = (((pb->ipacket[2] & 0x01) << 2) | (pb->ipacket[5] & 0x02) | ((pb->ipacket[4] & 0x02) >> 1)) + 8, .flags = PSM_FINGER_FUZZY, }; else f[0] = (finger_t) { .x = ((pb->ipacket[3] & 0x10) << 8) | ((pb->ipacket[1] & 0x0f) << 8) | pb->ipacket[4], .y = ((pb->ipacket[3] & 0x20) << 7) | ((pb->ipacket[1] & 0xf0) << 4) | pb->ipacket[5], .p = *z, .w = w, .flags = nfingers > 1 ? PSM_FINGER_FUZZY : 0, }; /* Ignore hovering and unmeasurable touches */ if (f[0].p < sc->syninfo.min_pressure || f[0].x < 2) nfingers = 0; /* Handle ClickPad */ if (sc->synhw.capClickPad) { clickpad_pressed = (pb->ipacket[0] ^ pb->ipacket[3]) & 0x01; if (sc->synhw.forcePad) { /* * Forcepads erroneously report button click if there * are 2 or more fingers on the touchpad breaking * multifinger gestures. To workaround this start * reporting a click only after 4 consecutive single * touch packets has been received. * Skip these packets in case more contacts appear. */ switch (nfingers) { case 0: sc->fpcount = 0; break; case 1: if (clickpad_pressed && sc->fpcount < INT_MAX) ++sc->fpcount; /* FALLTHROUGH */ default: if (!clickpad_pressed) sc->fpcount = 0; if (sc->fpcount >= sc->syninfo.window_min) touchpad_buttons |= MOUSE_BUTTON1DOWN; } } else if (clickpad_pressed) touchpad_buttons |= MOUSE_BUTTON1DOWN; } for (id = 0; id < PSM_FINGERS; id++) if (id >= nfingers) PSM_FINGER_RESET(f[id]); #ifdef EVDEV_SUPPORT if (evdev_rcpt_mask & EVDEV_RCPT_HW_MOUSE) { for (id = 0; id < PSM_FINGERS; id++) { if (PSM_FINGER_IS_SET(f[id])) psm_push_mt_finger(sc, id, &f[id]); else psm_release_mt_slot(sc->evdev_a, id); } evdev_push_key(sc->evdev_a, BTN_TOUCH, nfingers > 0); evdev_push_nfingers(sc->evdev_a, nfingers); if (nfingers > 0) psm_push_st_finger(sc, &f[0]); else evdev_push_abs(sc->evdev_a, ABS_PRESSURE, 0); evdev_push_mouse_btn(sc->evdev_a, touchpad_buttons); if (sc->synhw.capExtended && sc->synhw.capFourButtons) { evdev_push_key(sc->evdev_a, BTN_FORWARD, touchpad_buttons & MOUSE_BUTTON4DOWN); evdev_push_key(sc->evdev_a, BTN_BACK, touchpad_buttons & MOUSE_BUTTON5DOWN); } evdev_sync(sc->evdev_a); } #endif ms->button = touchpad_buttons; psmgestures(sc, &f[0], nfingers, ms); for (id = 0; id < PSM_FINGERS; id++) psmsmoother(sc, &f[id], id, ms, x, y); /* Palm detection doesn't terminate the current action. */ if (psmpalmdetect(sc, &f[0], nfingers)) { *x = *y = *z = 0; ms->button = ms->obutton; return (0); } ms->button |= extended_buttons | guest_buttons; SYNAPTICS_END: /* * Use the extra buttons as a scrollwheel * * XXX X.Org uses the Z axis for vertical wheel only, * whereas moused(8) understands special values to differ * vertical and horizontal wheels. * * xf86-input-mouse needs therefore a small patch to * understand these special values. Without it, the * horizontal wheel acts as a vertical wheel in X.Org. * * That's why the horizontal wheel is disabled by * default for now. */ if (ms->button & MOUSE_BUTTON4DOWN) *z = -1; else if (ms->button & MOUSE_BUTTON5DOWN) *z = 1; else if (ms->button & MOUSE_BUTTON6DOWN) *z = -2; else if (ms->button & MOUSE_BUTTON7DOWN) *z = 2; else *z = 0; ms->button &= ~(MOUSE_BUTTON4DOWN | MOUSE_BUTTON5DOWN | MOUSE_BUTTON6DOWN | MOUSE_BUTTON7DOWN); return (0); } static int +proc_synaptics_mux(struct psm_softc *sc, packetbuf_t *pb) +{ + int butt; + + /* + * Convert 3-byte interleaved mixture of Synaptics and generic mouse + * packets into plain 6-byte Synaptics packet protocol. + * While in hidden multiplexing mode KBC does some editing of the + * packet stream. It remembers the button bits from the last packet + * received from each device, and replaces the button bits of every + * packet with the logical OR of all devices’ most recent button bits. + * This button crosstalk should be filtered out as Synaptics and + * generic mouse encode middle button presses in a different way. + */ + switch (pb->ipacket[0] & 0xc0) { + case 0x80: /* First 3 bytes of Synaptics packet */ + bcopy(pb->ipacket, sc->muxsave, 3); + /* Compute middle mouse button supression timeout. */ + sc->muxmidtimeout.tv_sec = 0; + sc->muxmidtimeout.tv_usec = 50000; /* ~2-3 ints */ + timevaladd(&sc->muxmidtimeout, &sc->lastsoftintr); + return (1); + + case 0xc0: /* Second 3 bytes of Synaptics packet */ + /* Join two 3-bytes absolute packets */ + bcopy(pb->ipacket, pb->ipacket + 3, 3); + bcopy(sc->muxsave, pb->ipacket, 3); + /* Prefer trackpoint buttons over touchpad's */ + pb->ipacket[0] &= ~(0x08 | sc->muxmsbuttons); + pb->ipacket[3] &= ~(0x08 | sc->muxmsbuttons); + butt = (pb->ipacket[3] & 0x03) << 2 | (pb->ipacket[0] & 0x03); + /* Add hysteresis to remove spurious middle button events */ + if (butt != sc->muxtpbuttons && sc->fpcount < 1) { + pb->ipacket[0] &= 0xfc; + pb->ipacket[0] |= sc->muxtpbuttons & 0x03; + pb->ipacket[3] &= 0xfc; + pb->ipacket[3] |= sc->muxtpbuttons >> 2 & 0x03; + ++sc->fpcount; + } else { + sc->fpcount = 0; + sc->muxtpbuttons = butt; + } + /* Filter out impossible w induced by middle trackpoint btn */ + if (sc->synhw.capExtended && !sc->synhw.capPassthrough && + (pb->ipacket[0] & 0x34) == 0x04 && + (pb->ipacket[3] & 0x04) == 0x04) { + pb->ipacket[0] &= 0xfb; + pb->ipacket[3] &= 0xfb; + } + sc->muxsave[0] &= 0x30; + break; + + default: /* Generic mouse (Trackpoint) packet */ + /* Filter out middle button events induced by some w values */ + if (sc->muxmsbuttons & 0x03 || pb->ipacket[0] & 0x03 || + (timevalcmp(&sc->lastsoftintr, &sc->muxmidtimeout, <=) && + (sc->muxsave[0] & 0x30 || sc->muxsave[2] > 8))) + pb->ipacket[0] &= 0xfb; + sc->muxmsbuttons = pb->ipacket[0] & 0x07; + /* Convert to Synaptics pass-through protocol */ + pb->ipacket[4] = pb->ipacket[1]; + pb->ipacket[5] = pb->ipacket[2]; + pb->ipacket[1] = pb->ipacket[0]; + pb->ipacket[2] = 0; + pb->ipacket[0] = 0x84 | (sc->muxtpbuttons & 0x03); + pb->ipacket[3] = 0xc4 | (sc->muxtpbuttons >> 2 & 0x03); + } + + VLOG(4, (LOG_DEBUG, "synaptics: %02x %02x %02x %02x %02x %02x\n", + pb->ipacket[0], pb->ipacket[1], pb->ipacket[2], + pb->ipacket[3], pb->ipacket[4], pb->ipacket[5])); + + pb->inputbytes = MOUSE_SYNAPTICS_PACKETSIZE; + return (0); +} + +static int psmpalmdetect(struct psm_softc *sc, finger_t *f, int nfingers) { if (!( ((sc->synhw.capMultiFinger || sc->synhw.capAdvancedGestures) && !sc->synhw.capReportsV && nfingers > 1) || (sc->synhw.capReportsV && nfingers > 2) || (sc->synhw.capPalmDetect && f->w <= sc->syninfo.max_width) || (!sc->synhw.capPalmDetect && f->p <= sc->syninfo.max_pressure) || (sc->synhw.capPen && f->flags & PSM_FINGER_IS_PEN))) { /* * We consider the packet irrelevant for the current * action when: * - the width isn't comprised in: * [1; max_width] * - the pressure isn't comprised in: * [min_pressure; max_pressure] * - pen aren't supported but PSM_FINGER_IS_PEN is set */ VLOG(2, (LOG_DEBUG, "synaptics: palm detected! (%d)\n", f->w)); return (1); } return (0); } static void psmgestures(struct psm_softc *sc, finger_t *fingers, int nfingers, mousestatus_t *ms) { smoother_t *smoother; gesture_t *gest; finger_t *f; int y_ok, center_button, center_x, right_button, right_x, i; f = &fingers[0]; smoother = &sc->smoother[0]; gest = &sc->gesture; /* Find first active finger. */ if (nfingers > 0) { for (i = 0; i < PSM_FINGERS; i++) { if (PSM_FINGER_IS_SET(fingers[i])) { f = &fingers[i]; smoother = &sc->smoother[i]; break; } } } /* * Check pressure to detect a real wanted action on the * touchpad. */ if (f->p >= sc->syninfo.min_pressure) { int x0, y0; int dxp, dyp; int start_x, start_y; int queue_len; int margin_top, margin_right, margin_bottom, margin_left; int window_min, window_max; int vscroll_hor_area, vscroll_ver_area; int two_finger_scroll; int max_x, max_y; /* Read sysctl. */ /* XXX Verify values? */ margin_top = sc->syninfo.margin_top; margin_right = sc->syninfo.margin_right; margin_bottom = sc->syninfo.margin_bottom; margin_left = sc->syninfo.margin_left; window_min = sc->syninfo.window_min; window_max = sc->syninfo.window_max; vscroll_hor_area = sc->syninfo.vscroll_hor_area; vscroll_ver_area = sc->syninfo.vscroll_ver_area; two_finger_scroll = sc->syninfo.two_finger_scroll; max_x = sc->syninfo.max_x; max_y = sc->syninfo.max_y; /* Read current absolute position. */ x0 = f->x; y0 = f->y; /* * Limit the coordinates to the specified margins because * this area isn't very reliable. */ if (x0 <= margin_left) x0 = margin_left; else if (x0 >= max_x - margin_right) x0 = max_x - margin_right; if (y0 <= margin_bottom) y0 = margin_bottom; else if (y0 >= max_y - margin_top) y0 = max_y - margin_top; VLOG(3, (LOG_DEBUG, "synaptics: ipacket: [%d, %d], %d, %d\n", x0, y0, f->p, f->w)); /* * If the action is just beginning, init the structure and * compute tap timeout. */ if (!(sc->flags & PSM_FLAGS_FINGERDOWN)) { VLOG(3, (LOG_DEBUG, "synaptics: ----\n")); /* Initialize queue. */ gest->window_min = window_min; /* Reset pressure peak. */ gest->zmax = 0; /* Reset fingers count. */ gest->fingers_nb = 0; /* Reset virtual scrolling state. */ gest->in_vscroll = 0; /* Compute tap timeout. */ gest->taptimeout.tv_sec = tap_timeout / 1000000; gest->taptimeout.tv_usec = tap_timeout % 1000000; timevaladd(&gest->taptimeout, &sc->lastsoftintr); sc->flags |= PSM_FLAGS_FINGERDOWN; /* Smoother has not been reset yet */ queue_len = 1; start_x = x0; start_y = y0; } else { queue_len = smoother->queue_len + 1; start_x = smoother->start_x; start_y = smoother->start_y; } /* Process ClickPad softbuttons */ if (sc->synhw.capClickPad && ms->button & MOUSE_BUTTON1DOWN) { y_ok = sc->syninfo.softbuttons_y >= 0 ? start_y < sc->syninfo.softbuttons_y : start_y > max_y + sc->syninfo.softbuttons_y; center_button = MOUSE_BUTTON2DOWN; center_x = sc->syninfo.softbutton2_x; right_button = MOUSE_BUTTON3DOWN; right_x = sc->syninfo.softbutton3_x; if (center_x > 0 && right_x > 0 && center_x > right_x) { center_button = MOUSE_BUTTON3DOWN; center_x = sc->syninfo.softbutton3_x; right_button = MOUSE_BUTTON2DOWN; right_x = sc->syninfo.softbutton2_x; } if (right_x > 0 && start_x > right_x && y_ok) ms->button = (ms->button & ~MOUSE_BUTTON1DOWN) | right_button; else if (center_x > 0 && start_x > center_x && y_ok) ms->button = (ms->button & ~MOUSE_BUTTON1DOWN) | center_button; } /* If in tap-hold, add the recorded button. */ if (gest->in_taphold) ms->button |= gest->tap_button; /* * For tap, we keep the maximum number of fingers and the * pressure peak. Also with multiple fingers, we increase * the minimum window. */ if (nfingers > 1) gest->window_min = window_max; gest->fingers_nb = imax(nfingers, gest->fingers_nb); gest->zmax = imax(f->p, gest->zmax); /* Do we have enough packets to consider this a gesture? */ if (queue_len < gest->window_min) return; dyp = -1; dxp = -1; /* Is a scrolling action occurring? */ if (!gest->in_taphold && !ms->button && (!gest->in_vscroll || two_finger_scroll)) { /* * A scrolling action must not conflict with a tap * action. Here are the conditions to consider a * scrolling action: * - the action in a configurable area * - one of the following: * . the distance between the last packet and the * first should be above a configurable minimum * . tap timed out */ dxp = abs(x0 - start_x); dyp = abs(y0 - start_y); if (timevalcmp(&sc->lastsoftintr, &gest->taptimeout, >) || dxp >= sc->syninfo.vscroll_min_delta || dyp >= sc->syninfo.vscroll_min_delta) { /* * Handle two finger scrolling. * Note that we don't rely on fingers_nb * as that keeps the maximum number of fingers. */ if (two_finger_scroll) { if (nfingers == 2) { gest->in_vscroll += dyp ? 2 : 0; gest->in_vscroll += dxp ? 1 : 0; } } else { /* Check for horizontal scrolling. */ if ((vscroll_hor_area > 0 && start_y <= vscroll_hor_area) || (vscroll_hor_area < 0 && start_y >= max_y + vscroll_hor_area)) gest->in_vscroll += 2; /* Check for vertical scrolling. */ if ((vscroll_ver_area > 0 && start_x <= vscroll_ver_area) || (vscroll_ver_area < 0 && start_x >= max_x + vscroll_ver_area)) gest->in_vscroll += 1; } /* Avoid conflicts if area overlaps. */ if (gest->in_vscroll >= 3) gest->in_vscroll = (dxp > dyp) ? 2 : 1; } } /* * Reset two finger scrolling when the number of fingers * is different from two or any button is pressed. */ if (two_finger_scroll && gest->in_vscroll != 0 && (nfingers != 2 || ms->button)) gest->in_vscroll = 0; VLOG(5, (LOG_DEBUG, "synaptics: virtual scrolling: %s " "(direction=%d, dxp=%d, dyp=%d, fingers=%d)\n", gest->in_vscroll ? "YES" : "NO", gest->in_vscroll, dxp, dyp, gest->fingers_nb)); } else if (sc->flags & PSM_FLAGS_FINGERDOWN) { /* * An action is currently taking place but the pressure * dropped under the minimum, putting an end to it. */ int taphold_timeout, dx, dy, tap_max_delta; dx = abs(smoother->queue[smoother->queue_cursor].x - smoother->start_x); dy = abs(smoother->queue[smoother->queue_cursor].y - smoother->start_y); /* Max delta is disabled for multi-fingers tap. */ if (gest->fingers_nb > 1) tap_max_delta = imax(dx, dy); else tap_max_delta = sc->syninfo.tap_max_delta; sc->flags &= ~PSM_FLAGS_FINGERDOWN; /* Check for tap. */ VLOG(3, (LOG_DEBUG, "synaptics: zmax=%d, dx=%d, dy=%d, " "delta=%d, fingers=%d, queue=%d\n", gest->zmax, dx, dy, tap_max_delta, gest->fingers_nb, smoother->queue_len)); if (!gest->in_vscroll && gest->zmax >= tap_threshold && timevalcmp(&sc->lastsoftintr, &gest->taptimeout, <=) && dx <= tap_max_delta && dy <= tap_max_delta && smoother->queue_len >= sc->syninfo.tap_min_queue) { /* * We have a tap if: * - the maximum pressure went over tap_threshold * - the action ended before tap_timeout * * To handle tap-hold, we must delay any button push to * the next action. */ if (gest->in_taphold) { /* * This is the second and last tap of a * double tap action, not a tap-hold. */ gest->in_taphold = 0; /* * For double-tap to work: * - no button press is emitted (to * simulate a button release) * - PSM_FLAGS_FINGERDOWN is set to * force the next packet to emit a * button press) */ VLOG(2, (LOG_DEBUG, "synaptics: button RELEASE: %d\n", gest->tap_button)); sc->flags |= PSM_FLAGS_FINGERDOWN; /* Schedule button press on next interrupt */ sc->idletimeout.tv_sec = psmhz > 1 ? 0 : 1; sc->idletimeout.tv_usec = psmhz > 1 ? 1000000 / psmhz : 0; } else { /* * This is the first tap: we set the * tap-hold state and notify the button * down event. */ gest->in_taphold = 1; taphold_timeout = sc->syninfo.taphold_timeout; gest->taptimeout.tv_sec = taphold_timeout / 1000000; gest->taptimeout.tv_usec = taphold_timeout % 1000000; sc->idletimeout = gest->taptimeout; timevaladd(&gest->taptimeout, &sc->lastsoftintr); switch (gest->fingers_nb) { case 3: gest->tap_button = MOUSE_BUTTON2DOWN; break; case 2: gest->tap_button = MOUSE_BUTTON3DOWN; break; default: gest->tap_button = MOUSE_BUTTON1DOWN; } VLOG(2, (LOG_DEBUG, "synaptics: button PRESS: %d\n", gest->tap_button)); ms->button |= gest->tap_button; } } else { /* * Not enough pressure or timeout: reset * tap-hold state. */ if (gest->in_taphold) { VLOG(2, (LOG_DEBUG, "synaptics: button RELEASE: %d\n", gest->tap_button)); gest->in_taphold = 0; } else { VLOG(2, (LOG_DEBUG, "synaptics: not a tap-hold\n")); } } } else if (!(sc->flags & PSM_FLAGS_FINGERDOWN) && gest->in_taphold) { /* * For a tap-hold to work, the button must remain down at * least until timeout (where the in_taphold flags will be * cleared) or during the next action. */ if (timevalcmp(&sc->lastsoftintr, &gest->taptimeout, <=)) { ms->button |= gest->tap_button; } else { VLOG(2, (LOG_DEBUG, "synaptics: button RELEASE: %d\n", gest->tap_button)); gest->in_taphold = 0; } } return; } static void psmsmoother(struct psm_softc *sc, finger_t *f, int smoother_id, mousestatus_t *ms, int *x, int *y) { smoother_t *smoother = &sc->smoother[smoother_id]; gesture_t *gest = &(sc->gesture); /* * Check pressure to detect a real wanted action on the * touchpad. */ if (f->p >= sc->syninfo.min_pressure) { int x0, y0; int cursor, peer, window; int dx, dy, dxp, dyp; int max_width, max_pressure; int margin_top, margin_right, margin_bottom, margin_left; int na_top, na_right, na_bottom, na_left; int window_min, window_max; int multiplicator; int weight_current, weight_previous, weight_len_squared; int div_min, div_max, div_len; int vscroll_hor_area, vscroll_ver_area; int two_finger_scroll; int max_x, max_y; int len, weight_prev_x, weight_prev_y; int div_max_x, div_max_y, div_x, div_y; int is_fuzzy; /* Read sysctl. */ /* XXX Verify values? */ max_width = sc->syninfo.max_width; max_pressure = sc->syninfo.max_pressure; margin_top = sc->syninfo.margin_top; margin_right = sc->syninfo.margin_right; margin_bottom = sc->syninfo.margin_bottom; margin_left = sc->syninfo.margin_left; na_top = sc->syninfo.na_top; na_right = sc->syninfo.na_right; na_bottom = sc->syninfo.na_bottom; na_left = sc->syninfo.na_left; window_min = sc->syninfo.window_min; window_max = sc->syninfo.window_max; multiplicator = sc->syninfo.multiplicator; weight_current = sc->syninfo.weight_current; weight_previous = sc->syninfo.weight_previous; weight_len_squared = sc->syninfo.weight_len_squared; div_min = sc->syninfo.div_min; div_max = sc->syninfo.div_max; div_len = sc->syninfo.div_len; vscroll_hor_area = sc->syninfo.vscroll_hor_area; vscroll_ver_area = sc->syninfo.vscroll_ver_area; two_finger_scroll = sc->syninfo.two_finger_scroll; max_x = sc->syninfo.max_x; max_y = sc->syninfo.max_y; is_fuzzy = (f->flags & PSM_FINGER_FUZZY) != 0; /* Read current absolute position. */ x0 = f->x; y0 = f->y; /* * Limit the coordinates to the specified margins because * this area isn't very reliable. */ if (x0 <= margin_left) x0 = margin_left; else if (x0 >= max_x - margin_right) x0 = max_x - margin_right; if (y0 <= margin_bottom) y0 = margin_bottom; else if (y0 >= max_y - margin_top) y0 = max_y - margin_top; /* If the action is just beginning, init the structure. */ if (smoother->active == 0) { VLOG(3, (LOG_DEBUG, "smoother%d: ---\n", smoother_id)); /* Store the first point of this action. */ smoother->start_x = x0; smoother->start_y = y0; dx = dy = 0; /* Initialize queue. */ smoother->queue_cursor = SYNAPTICS_PACKETQUEUE; smoother->queue_len = 0; /* Reset average. */ smoother->avg_dx = 0; smoother->avg_dy = 0; /* Reset squelch. */ smoother->squelch_x = 0; smoother->squelch_y = 0; /* Activate queue */ smoother->active = 1; } else { /* Calculate the current delta. */ cursor = smoother->queue_cursor; dx = x0 - smoother->queue[cursor].x; dy = y0 - smoother->queue[cursor].y; } VLOG(3, (LOG_DEBUG, "smoother%d: ipacket: [%d, %d], %d, %d\n", smoother_id, x0, y0, f->p, f->w)); /* Queue this new packet. */ cursor = SYNAPTICS_QUEUE_CURSOR(smoother->queue_cursor - 1); smoother->queue[cursor].x = x0; smoother->queue[cursor].y = y0; smoother->queue_cursor = cursor; if (smoother->queue_len < SYNAPTICS_PACKETQUEUE) smoother->queue_len++; VLOG(5, (LOG_DEBUG, "smoother%d: cursor[%d]: x=%d, y=%d, dx=%d, dy=%d\n", smoother_id, cursor, x0, y0, dx, dy)); /* Do we have enough packets to consider this a movement? */ if (smoother->queue_len < gest->window_min) return; weight_prev_x = weight_prev_y = weight_previous; div_max_x = div_max_y = div_max; if (gest->in_vscroll) { /* Dividers are different with virtual scrolling. */ div_min = sc->syninfo.vscroll_div_min; div_max_x = div_max_y = sc->syninfo.vscroll_div_max; } else { /* * There's a lot of noise in coordinates when * the finger is on the touchpad's borders. When * using this area, we apply a special weight and * div. */ if (x0 <= na_left || x0 >= max_x - na_right) { weight_prev_x = sc->syninfo.weight_previous_na; div_max_x = sc->syninfo.div_max_na; } if (y0 <= na_bottom || y0 >= max_y - na_top) { weight_prev_y = sc->syninfo.weight_previous_na; div_max_y = sc->syninfo.div_max_na; } } /* * Calculate weights for the average operands and * the divisor. Both depend on the distance between * the current packet and a previous one (based on the * window width). */ window = imin(smoother->queue_len, window_max); peer = SYNAPTICS_QUEUE_CURSOR(cursor + window - 1); dxp = abs(x0 - smoother->queue[peer].x) + 1; dyp = abs(y0 - smoother->queue[peer].y) + 1; len = (dxp * dxp) + (dyp * dyp); weight_prev_x = imin(weight_prev_x, weight_len_squared * weight_prev_x / len); weight_prev_y = imin(weight_prev_y, weight_len_squared * weight_prev_y / len); len = (dxp + dyp) / 2; div_x = div_len * div_max_x / len; div_x = imin(div_max_x, div_x); div_x = imax(div_min, div_x); div_y = div_len * div_max_y / len; div_y = imin(div_max_y, div_y); div_y = imax(div_min, div_y); VLOG(3, (LOG_DEBUG, "smoother%d: peer=%d, len=%d, weight=%d/%d, div=%d/%d\n", smoother_id, peer, len, weight_prev_x, weight_prev_y, div_x, div_y)); /* Compute averages. */ smoother->avg_dx = (weight_current * dx * multiplicator + weight_prev_x * smoother->avg_dx) / (weight_current + weight_prev_x); smoother->avg_dy = (weight_current * dy * multiplicator + weight_prev_y * smoother->avg_dy) / (weight_current + weight_prev_y); VLOG(5, (LOG_DEBUG, "smoother%d: avg_dx~=%d, avg_dy~=%d\n", smoother_id, smoother->avg_dx / multiplicator, smoother->avg_dy / multiplicator)); /* Use these averages to calculate x & y. */ smoother->squelch_x += smoother->avg_dx; dxp = smoother->squelch_x / (div_x * multiplicator); smoother->squelch_x = smoother->squelch_x % (div_x * multiplicator); smoother->squelch_y += smoother->avg_dy; dyp = smoother->squelch_y / (div_y * multiplicator); smoother->squelch_y = smoother->squelch_y % (div_y * multiplicator); switch(gest->in_vscroll) { case 0: /* Pointer movement. */ /* On real<->fuzzy finger switch the x/y pos jumps */ if (is_fuzzy == smoother->is_fuzzy) { *x += dxp; *y += dyp; } VLOG(3, (LOG_DEBUG, "smoother%d: [%d, %d] -> [%d, %d]\n", smoother_id, dx, dy, dxp, dyp)); break; case 1: /* Vertical scrolling. */ if (dyp != 0) ms->button |= (dyp > 0) ? MOUSE_BUTTON4DOWN : MOUSE_BUTTON5DOWN; break; case 2: /* Horizontal scrolling. */ if (dxp != 0) ms->button |= (dxp > 0) ? MOUSE_BUTTON7DOWN : MOUSE_BUTTON6DOWN; break; } smoother->is_fuzzy = is_fuzzy; } else { /* * Deactivate queue. Note: We can not just reset queue here * as these values are still used by gesture processor. * So postpone reset till next touch. */ smoother->active = 0; } } static int proc_elantech(struct psm_softc *sc, packetbuf_t *pb, mousestatus_t *ms, int *x, int *y, int *z) { static int touchpad_button, trackpoint_button; finger_t fn, f[ELANTECH_MAX_FINGERS]; int pkt, id, scale, i, nfingers, mask; if (!elantech_support) return (0); /* Determine packet format and do a sanity check for out of sync packets. */ if (ELANTECH_PKT_IS_DEBOUNCE(pb, sc->elanhw.hwversion)) pkt = ELANTECH_PKT_NOP; else if (sc->elanhw.hastrackpoint && ELANTECH_PKT_IS_TRACKPOINT(pb)) pkt = ELANTECH_PKT_TRACKPOINT; else switch (sc->elanhw.hwversion) { case 2: if (!ELANTECH_PKT_IS_V2(pb)) return (-1); pkt = (pb->ipacket[0] & 0xc0) == 0x80 ? ELANTECH_PKT_V2_2FINGER : ELANTECH_PKT_V2_COMMON; break; case 3: if (!ELANTECH_PKT_IS_V3_HEAD(pb, sc->elanhw.hascrc) && !ELANTECH_PKT_IS_V3_TAIL(pb, sc->elanhw.hascrc)) return (-1); pkt = ELANTECH_PKT_V3; break; case 4: if (!ELANTECH_PKT_IS_V4(pb, sc->elanhw.hascrc)) return (-1); switch (pb->ipacket[3] & 0x03) { case 0x00: pkt = ELANTECH_PKT_V4_STATUS; break; case 0x01: pkt = ELANTECH_PKT_V4_HEAD; break; case 0x02: pkt = ELANTECH_PKT_V4_MOTION; break; default: return (-1); } break; default: return (-1); } VLOG(5, (LOG_DEBUG, "elantech: ipacket format: %d\n", pkt)); for (id = 0; id < ELANTECH_MAX_FINGERS; id++) PSM_FINGER_RESET(f[id]); *x = *y = *z = 0; ms->button = ms->obutton; if (sc->syninfo.touchpad_off) return (0); /* Common legend * L: Left mouse button pressed * R: Right mouse button pressed * N: number of fingers on touchpad * X: absolute x value (horizontal) * Y: absolute y value (vertical) * W; width of the finger touch * P: pressure */ switch (pkt) { case ELANTECH_PKT_V2_COMMON: /* HW V2. One/Three finger touch */ /* 7 6 5 4 3 2 1 0 (LSB) * ------------------------------------------- * ipacket[0]: N1 N0 W3 W2 . . R L * ipacket[1]: P7 P6 P5 P4 X11 X10 X9 X8 * ipacket[2]: X7 X6 X5 X4 X3 X2 X1 X0 * ipacket[3]: N4 VF W1 W0 . . . B2 * ipacket[4]: P3 P1 P2 P0 Y11 Y10 Y9 Y8 * ipacket[5]: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 * ------------------------------------------- * N4: set if more than 3 fingers (only in 3 fingers mode) * VF: a kind of flag? (only on EF123, 0 when finger * is over one of the buttons, 1 otherwise) * B2: (on EF113 only, 0 otherwise), one button pressed * P & W is not reported on EF113 touchpads */ nfingers = (pb->ipacket[0] & 0xc0) >> 6; if (nfingers == 3 && (pb->ipacket[3] & 0x80)) nfingers = 4; if (nfingers == 0) { mask = (1 << nfingers) - 1; /* = 0x00 */ break; } /* Map 3-rd and 4-th fingers to first finger */ mask = (1 << 1) - 1; /* = 0x01 */ f[0] = ELANTECH_FINGER_SET_XYP(pb); if (sc->elanhw.haspressure) { f[0].w = ((pb->ipacket[0] & 0x30) >> 2) | ((pb->ipacket[3] & 0x30) >> 4); } else { f[0].p = PSM_FINGER_DEFAULT_P; f[0].w = PSM_FINGER_DEFAULT_W; } /* * HW v2 dont report exact finger positions when 3 or more * fingers are on touchpad. */ if (nfingers > 2) f[0].flags = PSM_FINGER_FUZZY; break; case ELANTECH_PKT_V2_2FINGER: /*HW V2. Two finger touch */ /* 7 6 5 4 3 2 1 0 (LSB) * ------------------------------------------- * ipacket[0]: N1 N0 AY8 AX8 . . R L * ipacket[1]: AX7 AX6 AX5 AX4 AX3 AX2 AX1 AX0 * ipacket[2]: AY7 AY6 AY5 AY4 AY3 AY2 AY1 AY0 * ipacket[3]: . . BY8 BX8 . . . . * ipacket[4]: BX7 BX6 BX5 BX4 BX3 BX2 BX1 BX0 * ipacket[5]: BY7 BY6 BY5 BY4 BY3 BY2 BY1 BY0 * ------------------------------------------- * AX: lower-left finger absolute x value * AY: lower-left finger absolute y value * BX: upper-right finger absolute x value * BY: upper-right finger absolute y value */ nfingers = 2; mask = (1 << nfingers) - 1; for (id = 0; id < imin(2, ELANTECH_MAX_FINGERS); id ++) f[id] = (finger_t) { .x = (((pb->ipacket[id * 3] & 0x10) << 4) | pb->ipacket[id * 3 + 1]) << 2, .y = (((pb->ipacket[id * 3] & 0x20) << 3) | pb->ipacket[id * 3 + 2]) << 2, .p = PSM_FINGER_DEFAULT_P, .w = PSM_FINGER_DEFAULT_W, /* HW ver.2 sends bounding box */ .flags = PSM_FINGER_FUZZY }; break; case ELANTECH_PKT_V3: /* HW Version 3 */ /* 7 6 5 4 3 2 1 0 (LSB) * ------------------------------------------- * ipacket[0]: N1 N0 W3 W2 0 1 R L * ipacket[1]: P7 P6 P5 P4 X11 X10 X9 X8 * ipacket[2]: X7 X6 X5 X4 X3 X2 X1 X0 * ipacket[3]: 0 0 W1 W0 0 0 1 0 * ipacket[4]: P3 P1 P2 P0 Y11 Y10 Y9 Y8 * ipacket[5]: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 * ------------------------------------------- */ nfingers = (pb->ipacket[0] & 0xc0) >> 6; /* Map 3-rd finger to first finger */ id = nfingers > 2 ? 0 : nfingers - 1; mask = (1 << (id + 1)) - 1; if (nfingers == 0) break; fn = ELANTECH_FINGER_SET_XYP(pb); fn.w = ((pb->ipacket[0] & 0x30) >> 2) | ((pb->ipacket[3] & 0x30) >> 4); /* * HW v3 dont report exact finger positions when 3 or more * fingers are on touchpad. */ if (nfingers > 1) fn.flags = PSM_FINGER_FUZZY; if (nfingers == 2) { if (ELANTECH_PKT_IS_V3_HEAD(pb, sc->elanhw.hascrc)) { sc->elanaction.fingers[0] = fn; return (0); } else f[0] = sc->elanaction.fingers[0]; } f[id] = fn; break; case ELANTECH_PKT_V4_STATUS: /* HW Version 4. Status packet */ /* 7 6 5 4 3 2 1 0 (LSB) * ------------------------------------------- * ipacket[0]: . . . . 0 1 R L * ipacket[1]: . . . F4 F3 F2 F1 F0 * ipacket[2]: . . . . . . . . * ipacket[3]: . . . 1 0 0 0 0 * ipacket[4]: PL . . . . . . . * ipacket[5]: . . . . . . . . * ------------------------------------------- * Fn: finger n is on touchpad * PL: palm * HV ver4 sends a status packet to indicate that the numbers * or identities of the fingers has been changed */ mask = pb->ipacket[1] & 0x1f; nfingers = bitcount(mask); if (sc->elanaction.mask_v4wait != 0) VLOG(3, (LOG_DEBUG, "elantech: HW v4 status packet" " when not all previous head packets received\n")); /* Bitmap of fingers to receive before gesture processing */ sc->elanaction.mask_v4wait = mask & ~sc->elanaction.mask; /* Skip "new finger is on touchpad" packets */ if (sc->elanaction.mask_v4wait) { sc->elanaction.mask = mask; return (0); } break; case ELANTECH_PKT_V4_HEAD: /* HW Version 4. Head packet */ /* 7 6 5 4 3 2 1 0 (LSB) * ------------------------------------------- * ipacket[0]: W3 W2 W1 W0 0 1 R L * ipacket[1]: P7 P6 P5 P4 X11 X10 X9 X8 * ipacket[2]: X7 X6 X5 X4 X3 X2 X1 X0 * ipacket[3]: ID2 ID1 ID0 1 0 0 0 1 * ipacket[4]: P3 P1 P2 P0 Y11 Y10 Y9 Y8 * ipacket[5]: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 * ------------------------------------------- * ID: finger id * HW ver 4 sends head packets in two cases: * 1. One finger touch and movement. * 2. Next after status packet to tell new finger positions. */ mask = sc->elanaction.mask; nfingers = bitcount(mask); id = ((pb->ipacket[3] & 0xe0) >> 5) - 1; fn = ELANTECH_FINGER_SET_XYP(pb); fn.w =(pb->ipacket[0] & 0xf0) >> 4; if (id < 0) return (0); /* Packet is finger position update. Report it */ if (sc->elanaction.mask_v4wait == 0) { if (id < ELANTECH_MAX_FINGERS) f[id] = fn; break; } /* Remove finger from waiting bitmap and store into context */ sc->elanaction.mask_v4wait &= ~(1 << id); if (id < ELANTECH_MAX_FINGERS) sc->elanaction.fingers[id] = fn; /* Wait for other fingers if needed */ if (sc->elanaction.mask_v4wait != 0) return (0); /* All new fingers are received. Report them from context */ for (id = 0; id < ELANTECH_MAX_FINGERS; id++) if (sc->elanaction.mask & (1 << id)) f[id] = sc->elanaction.fingers[id]; break; case ELANTECH_PKT_V4_MOTION: /* HW Version 4. Motion packet */ /* 7 6 5 4 3 2 1 0 (LSB) * ------------------------------------------- * ipacket[0]: ID2 ID1 ID0 OF 0 1 R L * ipacket[1]: DX7 DX6 DX5 DX4 DX3 DX2 DX1 DX0 * ipacket[2]: DY7 DY6 DY5 DY4 DY3 DY2 DY1 DY0 * ipacket[3]: ID2 ID1 ID0 1 0 0 1 0 * ipacket[4]: DX7 DX6 DX5 DX4 DX3 DX2 DX1 DX0 * ipacket[5]: DY7 DY6 DY5 DY4 DY3 DY2 DY1 DY0 * ------------------------------------------- * OF: delta overflows (> 127 or < -128), in this case * firmware sends us (delta x / 5) and (delta y / 5) * ID: finger id * DX: delta x (two's complement) * XY: delta y (two's complement) * byte 0 ~ 2 for one finger * byte 3 ~ 5 for another finger */ mask = sc->elanaction.mask; nfingers = bitcount(mask); scale = (pb->ipacket[0] & 0x10) ? 5 : 1; for (i = 0; i <= 3; i += 3) { id = ((pb->ipacket[i] & 0xe0) >> 5) - 1; if (id < 0 || id >= ELANTECH_MAX_FINGERS) continue; if (PSM_FINGER_IS_SET(sc->elanaction.fingers[id])) { f[id] = sc->elanaction.fingers[id]; f[id].x += imax(-f[id].x, (signed char)pb->ipacket[i+1] * scale); f[id].y += imax(-f[id].y, (signed char)pb->ipacket[i+2] * scale); } else { VLOG(3, (LOG_DEBUG, "elantech: " "HW v4 motion packet skipped\n")); } } break; case ELANTECH_PKT_TRACKPOINT: /* 7 6 5 4 3 2 1 0 (LSB) * ------------------------------------------- * ipacket[0]: 0 0 SX SY 0 M R L * ipacket[1]: ~SX 0 0 0 0 0 0 0 * ipacket[2]: ~SY 0 0 0 0 0 0 0 * ipacket[3]: 0 0 ~SY ~SX 0 1 1 0 * ipacket[4]: X7 X6 X5 X4 X3 X2 X1 X0 * ipacket[5]: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 * ------------------------------------------- * X and Y are written in two's complement spread * over 9 bits with SX/SY the relative top bit and * X7..X0 and Y7..Y0 the lower bits. */ *x = (pb->ipacket[0] & 0x20) ? pb->ipacket[4] - 256 : pb->ipacket[4]; *y = (pb->ipacket[0] & 0x10) ? pb->ipacket[5] - 256 : pb->ipacket[5]; trackpoint_button = ((pb->ipacket[0] & 0x01) ? MOUSE_BUTTON1DOWN : 0) | ((pb->ipacket[0] & 0x02) ? MOUSE_BUTTON3DOWN : 0) | ((pb->ipacket[0] & 0x04) ? MOUSE_BUTTON2DOWN : 0); #ifdef EVDEV_SUPPORT evdev_push_rel(sc->evdev_r, REL_X, *x); evdev_push_rel(sc->evdev_r, REL_Y, -*y); evdev_push_mouse_btn(sc->evdev_r, trackpoint_button); evdev_sync(sc->evdev_r); #endif ms->button = touchpad_button | trackpoint_button; return (0); case ELANTECH_PKT_NOP: return (0); default: return (-1); } for (id = 0; id < ELANTECH_MAX_FINGERS; id++) if (PSM_FINGER_IS_SET(f[id])) VLOG(2, (LOG_DEBUG, "elantech: " "finger %d: down [%d, %d], %d, %d, %d\n", id + 1, f[id].x, f[id].y, f[id].p, f[id].w, f[id].flags)); /* Touchpad button presses */ if (sc->elanhw.isclickpad) { touchpad_button = ((pb->ipacket[0] & 0x03) ? MOUSE_BUTTON1DOWN : 0); } else { touchpad_button = ((pb->ipacket[0] & 0x01) ? MOUSE_BUTTON1DOWN : 0) | ((pb->ipacket[0] & 0x02) ? MOUSE_BUTTON3DOWN : 0); } #ifdef EVDEV_SUPPORT if (evdev_rcpt_mask & EVDEV_RCPT_HW_MOUSE) { for (id = 0; id < ELANTECH_MAX_FINGERS; id++) { if (PSM_FINGER_IS_SET(f[id])) { psm_push_mt_finger(sc, id, &f[id]); /* Convert touch width to surface units */ evdev_push_abs(sc->evdev_a, ABS_MT_TOUCH_MAJOR, f[id].w * sc->elanhw.dptracex); } if (sc->elanaction.mask & (1 << id) && !(mask & (1 << id))) psm_release_mt_slot(sc->evdev_a, id); } evdev_push_key(sc->evdev_a, BTN_TOUCH, nfingers > 0); evdev_push_nfingers(sc->evdev_a, nfingers); if (nfingers > 0) { if (PSM_FINGER_IS_SET(f[0])) psm_push_st_finger(sc, &f[0]); } else evdev_push_abs(sc->evdev_a, ABS_PRESSURE, 0); evdev_push_mouse_btn(sc->evdev_a, touchpad_button); evdev_sync(sc->evdev_a); } #endif ms->button = touchpad_button | trackpoint_button; /* Send finger 1 position to gesture processor */ if (PSM_FINGER_IS_SET(f[0]) || PSM_FINGER_IS_SET(f[1]) || nfingers == 0) psmgestures(sc, &f[0], imin(nfingers, 3), ms); /* Send fingers positions to movement smoothers */ for (id = 0; id < PSM_FINGERS; id++) if (PSM_FINGER_IS_SET(f[id]) || !(mask & (1 << id))) psmsmoother(sc, &f[id], id, ms, x, y); /* Store current finger positions in action context */ for (id = 0; id < ELANTECH_MAX_FINGERS; id++) { if (PSM_FINGER_IS_SET(f[id])) sc->elanaction.fingers[id] = f[id]; if ((sc->elanaction.mask & (1 << id)) && !(mask & (1 << id))) PSM_FINGER_RESET(sc->elanaction.fingers[id]); } sc->elanaction.mask = mask; /* Palm detection doesn't terminate the current action. */ if (psmpalmdetect(sc, &f[0], nfingers)) { *x = *y = *z = 0; ms->button = ms->obutton; return (0); } /* Use the extra buttons as a scrollwheel */ if (ms->button & MOUSE_BUTTON4DOWN) *z = -1; else if (ms->button & MOUSE_BUTTON5DOWN) *z = 1; else if (ms->button & MOUSE_BUTTON6DOWN) *z = -2; else if (ms->button & MOUSE_BUTTON7DOWN) *z = 2; else *z = 0; ms->button &= ~(MOUSE_BUTTON4DOWN | MOUSE_BUTTON5DOWN | MOUSE_BUTTON6DOWN | MOUSE_BUTTON7DOWN); return (0); } static void proc_versapad(struct psm_softc *sc, packetbuf_t *pb, mousestatus_t *ms, int *x, int *y, int *z) { static int butmap_versapad[8] = { 0, MOUSE_BUTTON3DOWN, 0, MOUSE_BUTTON3DOWN, MOUSE_BUTTON1DOWN, MOUSE_BUTTON1DOWN | MOUSE_BUTTON3DOWN, MOUSE_BUTTON1DOWN, MOUSE_BUTTON1DOWN | MOUSE_BUTTON3DOWN }; int c, x0, y0; /* VersaPad PS/2 absolute mode message format * * [packet1] 7 6 5 4 3 2 1 0(LSB) * ipacket[0]: 1 1 0 A 1 L T R * ipacket[1]: H7 H6 H5 H4 H3 H2 H1 H0 * ipacket[2]: V7 V6 V5 V4 V3 V2 V1 V0 * ipacket[3]: 1 1 1 A 1 L T R * ipacket[4]:V11 V10 V9 V8 H11 H10 H9 H8 * ipacket[5]: 0 P6 P5 P4 P3 P2 P1 P0 * * [note] * R: right physical mouse button (1=on) * T: touch pad virtual button (1=tapping) * L: left physical mouse button (1=on) * A: position data is valid (1=valid) * H: horizontal data (12bit signed integer. H11 is sign bit.) * V: vertical data (12bit signed integer. V11 is sign bit.) * P: pressure data * * Tapping is mapped to MOUSE_BUTTON4. */ c = pb->ipacket[0]; *x = *y = 0; ms->button = butmap_versapad[c & MOUSE_PS2VERSA_BUTTONS]; ms->button |= (c & MOUSE_PS2VERSA_TAP) ? MOUSE_BUTTON4DOWN : 0; if (c & MOUSE_PS2VERSA_IN_USE) { x0 = pb->ipacket[1] | (((pb->ipacket[4]) & 0x0f) << 8); y0 = pb->ipacket[2] | (((pb->ipacket[4]) & 0xf0) << 4); if (x0 & 0x800) x0 -= 0x1000; if (y0 & 0x800) y0 -= 0x1000; if (sc->flags & PSM_FLAGS_FINGERDOWN) { *x = sc->xold - x0; *y = y0 - sc->yold; if (*x < 0) /* XXX */ ++*x; else if (*x) --*x; if (*y < 0) ++*y; else if (*y) --*y; } else sc->flags |= PSM_FLAGS_FINGERDOWN; sc->xold = x0; sc->yold = y0; } else sc->flags &= ~PSM_FLAGS_FINGERDOWN; } static void psmsoftintridle(void *arg) { struct psm_softc *sc = arg; packetbuf_t *pb; /* Invoke soft handler only when pqueue is empty. Otherwise it will be * invoked from psmintr soon with pqueue filled with real data */ if (sc->pqueue_start == sc->pqueue_end && sc->idlepacket.inputbytes > 0) { /* Grow circular queue backwards to avoid race with psmintr */ if (--sc->pqueue_start < 0) sc->pqueue_start = PSM_PACKETQUEUE - 1; pb = &sc->pqueue[sc->pqueue_start]; memcpy(pb, &sc->idlepacket, sizeof(packetbuf_t)); VLOG(4, (LOG_DEBUG, "psmsoftintridle: %02x %02x %02x %02x %02x %02x\n", pb->ipacket[0], pb->ipacket[1], pb->ipacket[2], pb->ipacket[3], pb->ipacket[4], pb->ipacket[5])); psmsoftintr(arg); } } static void psmsoftintr(void *arg) { /* * the table to turn PS/2 mouse button bits (MOUSE_PS2_BUTTON?DOWN) * into `mousestatus' button bits (MOUSE_BUTTON?DOWN). */ static int butmap[8] = { 0, MOUSE_BUTTON1DOWN, MOUSE_BUTTON3DOWN, MOUSE_BUTTON1DOWN | MOUSE_BUTTON3DOWN, MOUSE_BUTTON2DOWN, MOUSE_BUTTON1DOWN | MOUSE_BUTTON2DOWN, MOUSE_BUTTON2DOWN | MOUSE_BUTTON3DOWN, MOUSE_BUTTON1DOWN | MOUSE_BUTTON2DOWN | MOUSE_BUTTON3DOWN }; struct psm_softc *sc = arg; mousestatus_t ms; packetbuf_t *pb; int x, y, z, c, l, s; getmicrouptime(&sc->lastsoftintr); s = spltty(); do { pb = &sc->pqueue[sc->pqueue_start]; if (sc->mode.level == PSM_LEVEL_NATIVE) goto next_native; c = pb->ipacket[0]; /* * A kludge for Kensington device! * The MSB of the horizontal count appears to be stored in * a strange place. */ if (sc->hw.model == MOUSE_MODEL_THINK) pb->ipacket[1] |= (c & MOUSE_PS2_XOVERFLOW) ? 0x80 : 0; /* ignore the overflow bits... */ x = (c & MOUSE_PS2_XNEG) ? pb->ipacket[1] - 256 : pb->ipacket[1]; y = (c & MOUSE_PS2_YNEG) ? pb->ipacket[2] - 256 : pb->ipacket[2]; z = 0; ms.obutton = sc->button; /* previous button state */ ms.button = butmap[c & MOUSE_PS2_BUTTONS]; /* `tapping' action */ if (sc->config & PSM_CONFIG_FORCETAP) ms.button |= ((c & MOUSE_PS2_TAP)) ? 0 : MOUSE_BUTTON4DOWN; timevalclear(&sc->idletimeout); sc->idlepacket.inputbytes = 0; switch (sc->hw.model) { case MOUSE_MODEL_EXPLORER: /* * b7 b6 b5 b4 b3 b2 b1 b0 * byte 1: oy ox sy sx 1 M R L * byte 2: x x x x x x x x * byte 3: y y y y y y y y * byte 4: * * S2 S1 s d2 d1 d0 * * L, M, R, S1, S2: left, middle, right and side buttons * s: wheel data sign bit * d2-d0: wheel data */ z = (pb->ipacket[3] & MOUSE_EXPLORER_ZNEG) ? (pb->ipacket[3] & 0x0f) - 16 : (pb->ipacket[3] & 0x0f); ms.button |= (pb->ipacket[3] & MOUSE_EXPLORER_BUTTON4DOWN) ? MOUSE_BUTTON4DOWN : 0; ms.button |= (pb->ipacket[3] & MOUSE_EXPLORER_BUTTON5DOWN) ? MOUSE_BUTTON5DOWN : 0; break; case MOUSE_MODEL_INTELLI: case MOUSE_MODEL_NET: /* wheel data is in the fourth byte */ z = (char)pb->ipacket[3]; /* * XXX some mice may send 7 when there is no Z movement? */ if ((z >= 7) || (z <= -7)) z = 0; /* some compatible mice have additional buttons */ ms.button |= (c & MOUSE_PS2INTELLI_BUTTON4DOWN) ? MOUSE_BUTTON4DOWN : 0; ms.button |= (c & MOUSE_PS2INTELLI_BUTTON5DOWN) ? MOUSE_BUTTON5DOWN : 0; break; case MOUSE_MODEL_MOUSEMANPLUS: proc_mmanplus(sc, pb, &ms, &x, &y, &z); break; case MOUSE_MODEL_GLIDEPOINT: /* `tapping' action */ ms.button |= ((c & MOUSE_PS2_TAP)) ? 0 : MOUSE_BUTTON4DOWN; break; case MOUSE_MODEL_NETSCROLL: /* * three additional bytes encode buttons and * wheel events */ ms.button |= (pb->ipacket[3] & MOUSE_PS2_BUTTON3DOWN) ? MOUSE_BUTTON4DOWN : 0; ms.button |= (pb->ipacket[3] & MOUSE_PS2_BUTTON1DOWN) ? MOUSE_BUTTON5DOWN : 0; z = (pb->ipacket[3] & MOUSE_PS2_XNEG) ? pb->ipacket[4] - 256 : pb->ipacket[4]; break; case MOUSE_MODEL_THINK: /* the fourth button state in the first byte */ ms.button |= (c & MOUSE_PS2_TAP) ? MOUSE_BUTTON4DOWN : 0; break; case MOUSE_MODEL_VERSAPAD: proc_versapad(sc, pb, &ms, &x, &y, &z); c = ((x < 0) ? MOUSE_PS2_XNEG : 0) | ((y < 0) ? MOUSE_PS2_YNEG : 0); break; case MOUSE_MODEL_4D: /* * b7 b6 b5 b4 b3 b2 b1 b0 * byte 1: s2 d2 s1 d1 1 M R L * byte 2: sx x x x x x x x * byte 3: sy y y y y y y y * * s1: wheel 1 direction * d1: wheel 1 data * s2: wheel 2 direction * d2: wheel 2 data */ x = (pb->ipacket[1] & 0x80) ? pb->ipacket[1] - 256 : pb->ipacket[1]; y = (pb->ipacket[2] & 0x80) ? pb->ipacket[2] - 256 : pb->ipacket[2]; switch (c & MOUSE_4D_WHEELBITS) { case 0x10: z = 1; break; case 0x30: z = -1; break; case 0x40: /* XXX 2nd wheel turning right */ z = 2; break; case 0xc0: /* XXX 2nd wheel turning left */ z = -2; break; } break; case MOUSE_MODEL_4DPLUS: if ((x < 16 - 256) && (y < 16 - 256)) { /* * b7 b6 b5 b4 b3 b2 b1 b0 * byte 1: 0 0 1 1 1 M R L * byte 2: 0 0 0 0 1 0 0 0 * byte 3: 0 0 0 0 S s d1 d0 * * L, M, R, S: left, middle, right, * and side buttons * s: wheel data sign bit * d1-d0: wheel data */ x = y = 0; if (pb->ipacket[2] & MOUSE_4DPLUS_BUTTON4DOWN) ms.button |= MOUSE_BUTTON4DOWN; z = (pb->ipacket[2] & MOUSE_4DPLUS_ZNEG) ? ((pb->ipacket[2] & 0x07) - 8) : (pb->ipacket[2] & 0x07) ; } else { /* preserve previous button states */ ms.button |= ms.obutton & MOUSE_EXTBUTTONS; } break; case MOUSE_MODEL_SYNAPTICS: + if (pb->inputbytes == MOUSE_PS2_PACKETSIZE) + if (proc_synaptics_mux(sc, pb)) + goto next; + if (proc_synaptics(sc, pb, &ms, &x, &y, &z) != 0) { VLOG(3, (LOG_DEBUG, "synaptics: " "packet rejected\n")); goto next; } break; case MOUSE_MODEL_ELANTECH: if (proc_elantech(sc, pb, &ms, &x, &y, &z) != 0) { VLOG(3, (LOG_DEBUG, "elantech: " "packet rejected\n")); goto next; } break; case MOUSE_MODEL_TRACKPOINT: case MOUSE_MODEL_GENERIC: default: break; } #ifdef EVDEV_SUPPORT if (evdev_rcpt_mask & EVDEV_RCPT_HW_MOUSE && sc->hw.model != MOUSE_MODEL_ELANTECH && sc->hw.model != MOUSE_MODEL_SYNAPTICS) { evdev_push_rel(sc->evdev_r, REL_X, x); evdev_push_rel(sc->evdev_r, REL_Y, -y); switch (sc->hw.model) { case MOUSE_MODEL_EXPLORER: case MOUSE_MODEL_INTELLI: case MOUSE_MODEL_NET: case MOUSE_MODEL_NETSCROLL: case MOUSE_MODEL_4DPLUS: evdev_push_rel(sc->evdev_r, REL_WHEEL, -z); break; case MOUSE_MODEL_MOUSEMANPLUS: case MOUSE_MODEL_4D: switch (z) { case 1: case -1: evdev_push_rel(sc->evdev_r, REL_WHEEL, -z); break; case 2: case -2: evdev_push_rel(sc->evdev_r, REL_HWHEEL, z / 2); break; } break; } evdev_push_mouse_btn(sc->evdev_r, ms.button); evdev_sync(sc->evdev_r); } #endif /* scale values */ if (sc->mode.accelfactor >= 1) { if (x != 0) { x = x * x / sc->mode.accelfactor; if (x == 0) x = 1; if (c & MOUSE_PS2_XNEG) x = -x; } if (y != 0) { y = y * y / sc->mode.accelfactor; if (y == 0) y = 1; if (c & MOUSE_PS2_YNEG) y = -y; } } /* Store last packet for reinjection if it has not been set already */ if (timevalisset(&sc->idletimeout) && sc->idlepacket.inputbytes == 0) sc->idlepacket = *pb; ms.dx = x; ms.dy = y; ms.dz = z; ms.flags = ((x || y || z) ? MOUSE_POSCHANGED : 0) | (ms.obutton ^ ms.button); pb->inputbytes = tame_mouse(sc, pb, &ms, pb->ipacket); sc->status.flags |= ms.flags; sc->status.dx += ms.dx; sc->status.dy += ms.dy; sc->status.dz += ms.dz; sc->status.button = ms.button; sc->button = ms.button; next_native: sc->watchdog = FALSE; /* queue data */ if (sc->queue.count + pb->inputbytes < sizeof(sc->queue.buf)) { l = imin(pb->inputbytes, sizeof(sc->queue.buf) - sc->queue.tail); bcopy(&pb->ipacket[0], &sc->queue.buf[sc->queue.tail], l); if (pb->inputbytes > l) bcopy(&pb->ipacket[l], &sc->queue.buf[0], pb->inputbytes - l); sc->queue.tail = (sc->queue.tail + pb->inputbytes) % sizeof(sc->queue.buf); sc->queue.count += pb->inputbytes; } next: pb->inputbytes = 0; if (++sc->pqueue_start >= PSM_PACKETQUEUE) sc->pqueue_start = 0; } while (sc->pqueue_start != sc->pqueue_end); if (sc->state & PSM_ASLP) { sc->state &= ~PSM_ASLP; wakeup(sc); } selwakeuppri(&sc->rsel, PZERO); if (sc->async != NULL) { pgsigio(&sc->async, SIGIO, 0); } sc->state &= ~PSM_SOFTARMED; /* schedule injection of predefined packet after idletimeout * if no data packets have been received from psmintr */ if (timevalisset(&sc->idletimeout)) { sc->state |= PSM_SOFTARMED; callout_reset(&sc->softcallout, tvtohz(&sc->idletimeout), psmsoftintridle, sc); VLOG(2, (LOG_DEBUG, "softintr: callout set: %d ticks\n", tvtohz(&sc->idletimeout))); } splx(s); } static int psmpoll(struct cdev *dev, int events, struct thread *td) { struct psm_softc *sc = dev->si_drv1; int s; int revents = 0; /* Return true if a mouse event available */ s = spltty(); if (events & (POLLIN | POLLRDNORM)) { if (sc->queue.count > 0) revents |= events & (POLLIN | POLLRDNORM); else selrecord(td, &sc->rsel); } splx(s); return (revents); } /* vendor/model specific routines */ static int mouse_id_proc1(KBDC kbdc, int res, int scale, int *status) { if (set_mouse_resolution(kbdc, res) != res) return (FALSE); if (set_mouse_scaling(kbdc, scale) && set_mouse_scaling(kbdc, scale) && set_mouse_scaling(kbdc, scale) && (get_mouse_status(kbdc, status, 0, 3) >= 3)) return (TRUE); return (FALSE); } static int mouse_ext_command(KBDC kbdc, int command) { int c; c = (command >> 6) & 0x03; if (set_mouse_resolution(kbdc, c) != c) return (FALSE); c = (command >> 4) & 0x03; if (set_mouse_resolution(kbdc, c) != c) return (FALSE); c = (command >> 2) & 0x03; if (set_mouse_resolution(kbdc, c) != c) return (FALSE); c = (command >> 0) & 0x03; if (set_mouse_resolution(kbdc, c) != c) return (FALSE); return (TRUE); } #ifdef notyet /* Logitech MouseMan Cordless II */ static int enable_lcordless(struct psm_softc *sc, enum probearg arg) { KBDC kbdc = sc->kbdc; int status[3]; int ch; if (!mouse_id_proc1(kbdc, PSMD_RES_HIGH, 2, status)) return (FALSE); if (status[1] == PSMD_RES_HIGH) return (FALSE); ch = (status[0] & 0x07) - 1; /* channel # */ if ((ch <= 0) || (ch > 4)) return (FALSE); /* * status[1]: always one? * status[2]: battery status? (0-100) */ return (TRUE); } #endif /* notyet */ /* Genius NetScroll Mouse, MouseSystems SmartScroll Mouse */ static int enable_groller(struct psm_softc *sc, enum probearg arg) { KBDC kbdc = sc->kbdc; int status[3]; /* * The special sequence to enable the fourth button and the * roller. Immediately after this sequence check status bytes. * if the mouse is NetScroll, the second and the third bytes are * '3' and 'D'. */ /* * If the mouse is an ordinary PS/2 mouse, the status bytes should * look like the following. * * byte 1 bit 7 always 0 * bit 6 stream mode (0) * bit 5 disabled (0) * bit 4 1:1 scaling (0) * bit 3 always 0 * bit 0-2 button status * byte 2 resolution (PSMD_RES_HIGH) * byte 3 report rate (?) */ if (!mouse_id_proc1(kbdc, PSMD_RES_HIGH, 1, status)) return (FALSE); if ((status[1] != '3') || (status[2] != 'D')) return (FALSE); /* FIXME: SmartScroll Mouse has 5 buttons! XXX */ if (arg == PROBE) sc->hw.buttons = 4; return (TRUE); } /* Genius NetMouse/NetMouse Pro, ASCII Mie Mouse, NetScroll Optical */ static int enable_gmouse(struct psm_softc *sc, enum probearg arg) { KBDC kbdc = sc->kbdc; int status[3]; /* * The special sequence to enable the middle, "rubber" button. * Immediately after this sequence check status bytes. * if the mouse is NetMouse, NetMouse Pro, or ASCII MIE Mouse, * the second and the third bytes are '3' and 'U'. * NOTE: NetMouse reports that it has three buttons although it has * two buttons and a rubber button. NetMouse Pro and MIE Mouse * say they have three buttons too and they do have a button on the * side... */ if (!mouse_id_proc1(kbdc, PSMD_RES_HIGH, 1, status)) return (FALSE); if ((status[1] != '3') || (status[2] != 'U')) return (FALSE); return (TRUE); } /* ALPS GlidePoint */ static int enable_aglide(struct psm_softc *sc, enum probearg arg) { KBDC kbdc = sc->kbdc; int status[3]; /* * The special sequence to obtain ALPS GlidePoint specific * information. Immediately after this sequence, status bytes will * contain something interesting. * NOTE: ALPS produces several models of GlidePoint. Some of those * do not respond to this sequence, thus, cannot be detected this way. */ if (set_mouse_sampling_rate(kbdc, 100) != 100) return (FALSE); if (!mouse_id_proc1(kbdc, PSMD_RES_LOW, 2, status)) return (FALSE); if ((status[1] == PSMD_RES_LOW) || (status[2] == 100)) return (FALSE); return (TRUE); } /* Kensington ThinkingMouse/Trackball */ static int enable_kmouse(struct psm_softc *sc, enum probearg arg) { static u_char rate[] = { 20, 60, 40, 20, 20, 60, 40, 20, 20 }; KBDC kbdc = sc->kbdc; int status[3]; int id1; int id2; int i; id1 = get_aux_id(kbdc); if (set_mouse_sampling_rate(kbdc, 10) != 10) return (FALSE); /* * The device is now in the native mode? It returns a different * ID value... */ id2 = get_aux_id(kbdc); if ((id1 == id2) || (id2 != 2)) return (FALSE); if (set_mouse_resolution(kbdc, PSMD_RES_LOW) != PSMD_RES_LOW) return (FALSE); #if PSM_DEBUG >= 2 /* at this point, resolution is LOW, sampling rate is 10/sec */ if (get_mouse_status(kbdc, status, 0, 3) < 3) return (FALSE); #endif /* * The special sequence to enable the third and fourth buttons. * Otherwise they behave like the first and second buttons. */ for (i = 0; i < nitems(rate); ++i) if (set_mouse_sampling_rate(kbdc, rate[i]) != rate[i]) return (FALSE); /* * At this point, the device is using default resolution and * sampling rate for the native mode. */ if (get_mouse_status(kbdc, status, 0, 3) < 3) return (FALSE); if ((status[1] == PSMD_RES_LOW) || (status[2] == rate[i - 1])) return (FALSE); /* the device appears be enabled by this sequence, diable it for now */ disable_aux_dev(kbdc); empty_aux_buffer(kbdc, 5); return (TRUE); } /* Logitech MouseMan+/FirstMouse+, IBM ScrollPoint Mouse */ static int enable_mmanplus(struct psm_softc *sc, enum probearg arg) { KBDC kbdc = sc->kbdc; int data[3]; /* the special sequence to enable the fourth button and the roller. */ /* * NOTE: for ScrollPoint to respond correctly, the SET_RESOLUTION * must be called exactly three times since the last RESET command * before this sequence. XXX */ if (!set_mouse_scaling(kbdc, 1)) return (FALSE); if (!mouse_ext_command(kbdc, 0x39) || !mouse_ext_command(kbdc, 0xdb)) return (FALSE); if (get_mouse_status(kbdc, data, 1, 3) < 3) return (FALSE); /* * PS2++ protocol, packet type 0 * * b7 b6 b5 b4 b3 b2 b1 b0 * byte 1: * 1 p3 p2 1 * * * * byte 2: 1 1 p1 p0 m1 m0 1 0 * byte 3: m7 m6 m5 m4 m3 m2 m1 m0 * * p3-p0: packet type: 0 * m7-m0: model ID: MouseMan+:0x50, * FirstMouse+:0x51, * ScrollPoint:0x58... */ /* check constant bits */ if ((data[0] & MOUSE_PS2PLUS_SYNCMASK) != MOUSE_PS2PLUS_SYNC) return (FALSE); if ((data[1] & 0xc3) != 0xc2) return (FALSE); /* check d3-d0 in byte 2 */ if (!MOUSE_PS2PLUS_CHECKBITS(data)) return (FALSE); /* check p3-p0 */ if (MOUSE_PS2PLUS_PACKET_TYPE(data) != 0) return (FALSE); if (arg == PROBE) { sc->hw.hwid &= 0x00ff; sc->hw.hwid |= data[2] << 8; /* save model ID */ } /* * MouseMan+ (or FirstMouse+) is now in its native mode, in which * the wheel and the fourth button events are encoded in the * special data packet. The mouse may be put in the IntelliMouse mode * if it is initialized by the IntelliMouse's method. */ return (TRUE); } /* MS IntelliMouse Explorer */ static int enable_msexplorer(struct psm_softc *sc, enum probearg arg) { KBDC kbdc = sc->kbdc; static u_char rate0[] = { 200, 100, 80, }; static u_char rate1[] = { 200, 200, 80, }; int id; int i; /* * This is needed for at least A4Tech X-7xx mice - they do not go * straight to Explorer mode, but need to be set to Intelli mode * first. */ enable_msintelli(sc, arg); /* the special sequence to enable the extra buttons and the roller. */ for (i = 0; i < nitems(rate1); ++i) if (set_mouse_sampling_rate(kbdc, rate1[i]) != rate1[i]) return (FALSE); /* the device will give the genuine ID only after the above sequence */ id = get_aux_id(kbdc); if (id != PSM_EXPLORER_ID) return (FALSE); if (arg == PROBE) { sc->hw.buttons = 5; /* IntelliMouse Explorer XXX */ sc->hw.hwid = id; } /* * XXX: this is a kludge to fool some KVM switch products * which think they are clever enough to know the 4-byte IntelliMouse * protocol, and assume any other protocols use 3-byte packets. * They don't convey 4-byte data packets from the IntelliMouse Explorer * correctly to the host computer because of this! * The following sequence is actually IntelliMouse's "wake up" * sequence; it will make the KVM think the mouse is IntelliMouse * when it is in fact IntelliMouse Explorer. */ for (i = 0; i < nitems(rate0); ++i) if (set_mouse_sampling_rate(kbdc, rate0[i]) != rate0[i]) break; get_aux_id(kbdc); return (TRUE); } /* * MS IntelliMouse * Logitech MouseMan+ and FirstMouse+ will also respond to this * probe routine and act like IntelliMouse. */ static int enable_msintelli(struct psm_softc *sc, enum probearg arg) { KBDC kbdc = sc->kbdc; static u_char rate[] = { 200, 100, 80, }; int id; int i; /* the special sequence to enable the third button and the roller. */ for (i = 0; i < nitems(rate); ++i) if (set_mouse_sampling_rate(kbdc, rate[i]) != rate[i]) return (FALSE); /* the device will give the genuine ID only after the above sequence */ id = get_aux_id(kbdc); if (id != PSM_INTELLI_ID) return (FALSE); if (arg == PROBE) { sc->hw.buttons = 3; sc->hw.hwid = id; } return (TRUE); } /* * A4 Tech 4D Mouse * Newer wheel mice from A4 Tech may use the 4D+ protocol. */ static int enable_4dmouse(struct psm_softc *sc, enum probearg arg) { static u_char rate[] = { 200, 100, 80, 60, 40, 20 }; KBDC kbdc = sc->kbdc; int id; int i; for (i = 0; i < nitems(rate); ++i) if (set_mouse_sampling_rate(kbdc, rate[i]) != rate[i]) return (FALSE); id = get_aux_id(kbdc); /* * WinEasy 4D, 4 Way Scroll 4D: 6 * Cable-Free 4D: 8 (4DPLUS) * WinBest 4D+, 4 Way Scroll 4D+: 8 (4DPLUS) */ if (id != PSM_4DMOUSE_ID) return (FALSE); if (arg == PROBE) { sc->hw.buttons = 3; /* XXX some 4D mice have 4? */ sc->hw.hwid = id; } return (TRUE); } /* * A4 Tech 4D+ Mouse * Newer wheel mice from A4 Tech seem to use this protocol. * Older models are recognized as either 4D Mouse or IntelliMouse. */ static int enable_4dplus(struct psm_softc *sc, enum probearg arg) { KBDC kbdc = sc->kbdc; int id; /* * enable_4dmouse() already issued the following ID sequence... static u_char rate[] = { 200, 100, 80, 60, 40, 20 }; int i; for (i = 0; i < sizeof(rate)/sizeof(rate[0]); ++i) if (set_mouse_sampling_rate(kbdc, rate[i]) != rate[i]) return (FALSE); */ id = get_aux_id(kbdc); switch (id) { case PSM_4DPLUS_ID: break; case PSM_4DPLUS_RFSW35_ID: break; default: return (FALSE); } if (arg == PROBE) { sc->hw.buttons = (id == PSM_4DPLUS_ID) ? 4 : 3; sc->hw.hwid = id; } return (TRUE); } /* Synaptics Touchpad */ static int synaptics_sysctl(SYSCTL_HANDLER_ARGS) { struct psm_softc *sc; int error, arg; if (oidp->oid_arg1 == NULL || oidp->oid_arg2 < 0 || oidp->oid_arg2 > SYNAPTICS_SYSCTL_SOFTBUTTON3_X) return (EINVAL); sc = oidp->oid_arg1; /* Read the current value. */ arg = *(int *)((char *)sc + oidp->oid_arg2); error = sysctl_handle_int(oidp, &arg, 0, req); /* Sanity check. */ if (error || !req->newptr) return (error); /* * Check that the new value is in the concerned node's range * of values. */ switch (oidp->oid_arg2) { case SYNAPTICS_SYSCTL_MIN_PRESSURE: case SYNAPTICS_SYSCTL_MAX_PRESSURE: if (arg < 0 || arg > 255) return (EINVAL); break; case SYNAPTICS_SYSCTL_MAX_WIDTH: if (arg < 4 || arg > 15) return (EINVAL); break; case SYNAPTICS_SYSCTL_MARGIN_TOP: case SYNAPTICS_SYSCTL_MARGIN_BOTTOM: case SYNAPTICS_SYSCTL_NA_TOP: case SYNAPTICS_SYSCTL_NA_BOTTOM: if (arg < 0 || arg > sc->synhw.maximumYCoord) return (EINVAL); break; case SYNAPTICS_SYSCTL_SOFTBUTTON2_X: case SYNAPTICS_SYSCTL_SOFTBUTTON3_X: /* Softbuttons is clickpad only feature */ if (!sc->synhw.capClickPad && arg != 0) return (EINVAL); /* FALLTHROUGH */ case SYNAPTICS_SYSCTL_MARGIN_RIGHT: case SYNAPTICS_SYSCTL_MARGIN_LEFT: case SYNAPTICS_SYSCTL_NA_RIGHT: case SYNAPTICS_SYSCTL_NA_LEFT: if (arg < 0 || arg > sc->synhw.maximumXCoord) return (EINVAL); break; case SYNAPTICS_SYSCTL_WINDOW_MIN: case SYNAPTICS_SYSCTL_WINDOW_MAX: case SYNAPTICS_SYSCTL_TAP_MIN_QUEUE: if (arg < 1 || arg > SYNAPTICS_PACKETQUEUE) return (EINVAL); break; case SYNAPTICS_SYSCTL_MULTIPLICATOR: case SYNAPTICS_SYSCTL_WEIGHT_CURRENT: case SYNAPTICS_SYSCTL_WEIGHT_PREVIOUS: case SYNAPTICS_SYSCTL_WEIGHT_PREVIOUS_NA: case SYNAPTICS_SYSCTL_WEIGHT_LEN_SQUARED: case SYNAPTICS_SYSCTL_DIV_MIN: case SYNAPTICS_SYSCTL_DIV_MAX: case SYNAPTICS_SYSCTL_DIV_MAX_NA: case SYNAPTICS_SYSCTL_DIV_LEN: case SYNAPTICS_SYSCTL_VSCROLL_DIV_MIN: case SYNAPTICS_SYSCTL_VSCROLL_DIV_MAX: if (arg < 1) return (EINVAL); break; case SYNAPTICS_SYSCTL_TAP_MAX_DELTA: case SYNAPTICS_SYSCTL_TAPHOLD_TIMEOUT: case SYNAPTICS_SYSCTL_VSCROLL_MIN_DELTA: if (arg < 0) return (EINVAL); break; case SYNAPTICS_SYSCTL_VSCROLL_HOR_AREA: if (arg < -sc->synhw.maximumXCoord || arg > sc->synhw.maximumXCoord) return (EINVAL); break; case SYNAPTICS_SYSCTL_SOFTBUTTONS_Y: /* Softbuttons is clickpad only feature */ if (!sc->synhw.capClickPad && arg != 0) return (EINVAL); /* FALLTHROUGH */ case SYNAPTICS_SYSCTL_VSCROLL_VER_AREA: if (arg < -sc->synhw.maximumYCoord || arg > sc->synhw.maximumYCoord) return (EINVAL); break; case SYNAPTICS_SYSCTL_TOUCHPAD_OFF: if (arg < 0 || arg > 1) return (EINVAL); break; default: return (EINVAL); } /* Update. */ *(int *)((char *)sc + oidp->oid_arg2) = arg; return (error); } static void synaptics_sysctl_create_softbuttons_tree(struct psm_softc *sc) { /* * Set predefined sizes for softbuttons. * Values are taken to match HP Pavilion dv6 clickpad drawings * with thin middle softbutton placed on separator */ /* hw.psm.synaptics.softbuttons_y */ sc->syninfo.softbuttons_y = 1700; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "softbuttons_y", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_SOFTBUTTONS_Y, synaptics_sysctl, "I", "Vertical size of softbuttons area"); /* hw.psm.synaptics.softbutton2_x */ sc->syninfo.softbutton2_x = 3100; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "softbutton2_x", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_SOFTBUTTON2_X, synaptics_sysctl, "I", "Horisontal position of 2-nd softbutton left edge (0-disable)"); /* hw.psm.synaptics.softbutton3_x */ sc->syninfo.softbutton3_x = 3900; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "softbutton3_x", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_SOFTBUTTON3_X, synaptics_sysctl, "I", "Horisontal position of 3-rd softbutton left edge (0-disable)"); } static void synaptics_sysctl_create_tree(struct psm_softc *sc, const char *name, const char *descr) { if (sc->syninfo.sysctl_tree != NULL) return; /* Attach extra synaptics sysctl nodes under hw.psm.synaptics */ sysctl_ctx_init(&sc->syninfo.sysctl_ctx); sc->syninfo.sysctl_tree = SYSCTL_ADD_NODE(&sc->syninfo.sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_psm), OID_AUTO, name, CTLFLAG_RD, 0, descr); /* hw.psm.synaptics.directional_scrolls. */ sc->syninfo.directional_scrolls = 0; SYSCTL_ADD_INT(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "directional_scrolls", CTLFLAG_RW|CTLFLAG_ANYBODY, &sc->syninfo.directional_scrolls, 0, "Enable hardware scrolling pad (if non-zero) or register it as " "extended buttons (if 0)"); /* hw.psm.synaptics.max_x. */ sc->syninfo.max_x = 6143; SYSCTL_ADD_INT(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "max_x", CTLFLAG_RD|CTLFLAG_ANYBODY, &sc->syninfo.max_x, 0, "Horizontal reporting range"); /* hw.psm.synaptics.max_y. */ sc->syninfo.max_y = 6143; SYSCTL_ADD_INT(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "max_y", CTLFLAG_RD|CTLFLAG_ANYBODY, &sc->syninfo.max_y, 0, "Vertical reporting range"); /* * Turn off two finger scroll if we have a * physical area reserved for scrolling or when * there's no multi finger support. */ if (sc->synhw.verticalScroll || (sc->synhw.capMultiFinger == 0 && sc->synhw.capAdvancedGestures == 0)) sc->syninfo.two_finger_scroll = 0; else sc->syninfo.two_finger_scroll = 1; /* hw.psm.synaptics.two_finger_scroll. */ SYSCTL_ADD_INT(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "two_finger_scroll", CTLFLAG_RW|CTLFLAG_ANYBODY, &sc->syninfo.two_finger_scroll, 0, "Enable two finger scrolling"); /* hw.psm.synaptics.min_pressure. */ sc->syninfo.min_pressure = 32; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "min_pressure", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_MIN_PRESSURE, synaptics_sysctl, "I", "Minimum pressure required to start an action"); /* hw.psm.synaptics.max_pressure. */ sc->syninfo.max_pressure = 220; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "max_pressure", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_MAX_PRESSURE, synaptics_sysctl, "I", "Maximum pressure to detect palm"); /* hw.psm.synaptics.max_width. */ sc->syninfo.max_width = 10; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "max_width", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_MAX_WIDTH, synaptics_sysctl, "I", "Maximum finger width to detect palm"); /* hw.psm.synaptics.top_margin. */ sc->syninfo.margin_top = 200; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "margin_top", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_MARGIN_TOP, synaptics_sysctl, "I", "Top margin"); /* hw.psm.synaptics.right_margin. */ sc->syninfo.margin_right = 200; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "margin_right", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_MARGIN_RIGHT, synaptics_sysctl, "I", "Right margin"); /* hw.psm.synaptics.bottom_margin. */ sc->syninfo.margin_bottom = 200; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "margin_bottom", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_MARGIN_BOTTOM, synaptics_sysctl, "I", "Bottom margin"); /* hw.psm.synaptics.left_margin. */ sc->syninfo.margin_left = 200; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "margin_left", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_MARGIN_LEFT, synaptics_sysctl, "I", "Left margin"); /* hw.psm.synaptics.na_top. */ sc->syninfo.na_top = 1783; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "na_top", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_NA_TOP, synaptics_sysctl, "I", "Top noisy area, where weight_previous_na is used instead " "of weight_previous"); /* hw.psm.synaptics.na_right. */ sc->syninfo.na_right = 563; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "na_right", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_NA_RIGHT, synaptics_sysctl, "I", "Right noisy area, where weight_previous_na is used instead " "of weight_previous"); /* hw.psm.synaptics.na_bottom. */ sc->syninfo.na_bottom = 1408; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "na_bottom", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_NA_BOTTOM, synaptics_sysctl, "I", "Bottom noisy area, where weight_previous_na is used instead " "of weight_previous"); /* hw.psm.synaptics.na_left. */ sc->syninfo.na_left = 1600; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "na_left", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_NA_LEFT, synaptics_sysctl, "I", "Left noisy area, where weight_previous_na is used instead " "of weight_previous"); /* hw.psm.synaptics.window_min. */ sc->syninfo.window_min = 4; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "window_min", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_WINDOW_MIN, synaptics_sysctl, "I", "Minimum window size to start an action"); /* hw.psm.synaptics.window_max. */ sc->syninfo.window_max = 10; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "window_max", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_WINDOW_MAX, synaptics_sysctl, "I", "Maximum window size"); /* hw.psm.synaptics.multiplicator. */ sc->syninfo.multiplicator = 10000; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "multiplicator", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_MULTIPLICATOR, synaptics_sysctl, "I", "Multiplicator to increase precision in averages and divisions"); /* hw.psm.synaptics.weight_current. */ sc->syninfo.weight_current = 3; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "weight_current", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_WEIGHT_CURRENT, synaptics_sysctl, "I", "Weight of the current movement in the new average"); /* hw.psm.synaptics.weight_previous. */ sc->syninfo.weight_previous = 6; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "weight_previous", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_WEIGHT_PREVIOUS, synaptics_sysctl, "I", "Weight of the previous average"); /* hw.psm.synaptics.weight_previous_na. */ sc->syninfo.weight_previous_na = 20; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "weight_previous_na", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_WEIGHT_PREVIOUS_NA, synaptics_sysctl, "I", "Weight of the previous average (inside the noisy area)"); /* hw.psm.synaptics.weight_len_squared. */ sc->syninfo.weight_len_squared = 2000; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "weight_len_squared", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_WEIGHT_LEN_SQUARED, synaptics_sysctl, "I", "Length (squared) of segments where weight_previous " "starts to decrease"); /* hw.psm.synaptics.div_min. */ sc->syninfo.div_min = 9; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "div_min", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_DIV_MIN, synaptics_sysctl, "I", "Divisor for fast movements"); /* hw.psm.synaptics.div_max. */ sc->syninfo.div_max = 17; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "div_max", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_DIV_MAX, synaptics_sysctl, "I", "Divisor for slow movements"); /* hw.psm.synaptics.div_max_na. */ sc->syninfo.div_max_na = 30; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "div_max_na", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_DIV_MAX_NA, synaptics_sysctl, "I", "Divisor with slow movements (inside the noisy area)"); /* hw.psm.synaptics.div_len. */ sc->syninfo.div_len = 100; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "div_len", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_DIV_LEN, synaptics_sysctl, "I", "Length of segments where div_max starts to decrease"); /* hw.psm.synaptics.tap_max_delta. */ sc->syninfo.tap_max_delta = 80; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "tap_max_delta", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_TAP_MAX_DELTA, synaptics_sysctl, "I", "Length of segments above which a tap is ignored"); /* hw.psm.synaptics.tap_min_queue. */ sc->syninfo.tap_min_queue = 2; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "tap_min_queue", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_TAP_MIN_QUEUE, synaptics_sysctl, "I", "Number of packets required to consider a tap"); /* hw.psm.synaptics.taphold_timeout. */ sc->gesture.in_taphold = 0; sc->syninfo.taphold_timeout = tap_timeout; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "taphold_timeout", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_TAPHOLD_TIMEOUT, synaptics_sysctl, "I", "Maximum elapsed time between two taps to consider a tap-hold " "action"); /* hw.psm.synaptics.vscroll_hor_area. */ sc->syninfo.vscroll_hor_area = 0; /* 1300 */ SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "vscroll_hor_area", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_VSCROLL_HOR_AREA, synaptics_sysctl, "I", "Area reserved for horizontal virtual scrolling"); /* hw.psm.synaptics.vscroll_ver_area. */ sc->syninfo.vscroll_ver_area = -400 - sc->syninfo.margin_right; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "vscroll_ver_area", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_VSCROLL_VER_AREA, synaptics_sysctl, "I", "Area reserved for vertical virtual scrolling"); /* hw.psm.synaptics.vscroll_min_delta. */ sc->syninfo.vscroll_min_delta = 50; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "vscroll_min_delta", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_VSCROLL_MIN_DELTA, synaptics_sysctl, "I", "Minimum movement to consider virtual scrolling"); /* hw.psm.synaptics.vscroll_div_min. */ sc->syninfo.vscroll_div_min = 100; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "vscroll_div_min", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_VSCROLL_DIV_MIN, synaptics_sysctl, "I", "Divisor for fast scrolling"); /* hw.psm.synaptics.vscroll_div_min. */ sc->syninfo.vscroll_div_max = 150; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "vscroll_div_max", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_VSCROLL_DIV_MAX, synaptics_sysctl, "I", "Divisor for slow scrolling"); /* hw.psm.synaptics.touchpad_off. */ sc->syninfo.touchpad_off = 0; SYSCTL_ADD_PROC(&sc->syninfo.sysctl_ctx, SYSCTL_CHILDREN(sc->syninfo.sysctl_tree), OID_AUTO, "touchpad_off", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, SYNAPTICS_SYSCTL_TOUCHPAD_OFF, synaptics_sysctl, "I", "Turn off touchpad"); sc->syninfo.softbuttons_y = 0; sc->syninfo.softbutton2_x = 0; sc->syninfo.softbutton3_x = 0; /* skip softbuttons sysctl on not clickpads */ if (sc->synhw.capClickPad) synaptics_sysctl_create_softbuttons_tree(sc); } static int synaptics_preferred_mode(struct psm_softc *sc) { int mode_byte; /* Check if we are in relative mode */ if (sc->hw.model != MOUSE_MODEL_SYNAPTICS) { if (tap_enabled == 0) /* * Disable tap & drag gestures. We use a Mode Byte * and set the DisGest bit (see §2.5 of Synaptics * TouchPad Interfacing Guide). */ return (0x04); else /* * Enable tap & drag gestures. We use a Mode Byte * and clear the DisGest bit (see §2.5 of Synaptics * TouchPad Interfacing Guide). */ return (0x00); } mode_byte = 0xc4; /* request wmode where available */ if (sc->synhw.capExtended) mode_byte |= 1; return mode_byte; } static void synaptics_set_mode(struct psm_softc *sc, int mode_byte) { mouse_ext_command(sc->kbdc, mode_byte); /* "Commit" the Set Mode Byte command sent above. */ set_mouse_sampling_rate(sc->kbdc, 20); /* * Enable advanced gestures mode if supported and we are not entering * passthrough or relative mode. */ if ((sc->synhw.capAdvancedGestures || sc->synhw.capReportsV) && sc->hw.model == MOUSE_MODEL_SYNAPTICS && !(mode_byte & (1 << 5))) { mouse_ext_command(sc->kbdc, 3); set_mouse_sampling_rate(sc->kbdc, 0xc8); } } +/* + * AUX MUX detection code should be placed at very beginning of probe sequence + * at least before 4-byte protocol mouse probes e.g. MS IntelliMouse probe as + * latter can trigger switching the MUX to incompatible state. + */ static int +enable_synaptics_mux(struct psm_softc *sc, enum probearg arg) +{ + KBDC kbdc = sc->kbdc; + int port, version; + int probe = FALSE; + int active_ports_count = 0; + int active_ports_mask = 0; + + version = enable_aux_mux(kbdc); + if (version == -1) + return (FALSE); + + for (port = 0; port < KBDC_AUX_MUX_NUM_PORTS; port++) { + VLOG(3, (LOG_DEBUG, "aux_mux: ping port %d\n", port)); + set_active_aux_mux_port(kbdc, port); + if (enable_aux_dev(kbdc) && disable_aux_dev(kbdc)) { + active_ports_count++; + active_ports_mask |= 1 << port; + } + } + + if (verbose >= 2) + printf("Active Multiplexing PS/2 controller v%d.%d with %d " + "active port(s)\n", version >> 4 & 0x0f, version & 0x0f, + active_ports_count); + + /* psm has a special support for GenMouse + SynTouchpad combination */ + if (active_ports_count >= 2) { + for (port = 0; port < KBDC_AUX_MUX_NUM_PORTS; port++) { + if ((active_ports_mask & 1 << port) == 0) + continue; + VLOG(3, (LOG_DEBUG, "aux_mux: probe port %d\n", port)); + set_active_aux_mux_port(kbdc, port); + probe = enable_synaptics(sc, arg); + if (probe) { + if (arg == PROBE) + sc->muxport = port; + break; + } + } + } + + /* IRQ handler does not support active multiplexing mode */ + disable_aux_mux(kbdc); + + return (probe); +} + +static int enable_synaptics(struct psm_softc *sc, enum probearg arg) { device_t psmcpnp; struct psmcpnp_softc *psmcpnp_sc; KBDC kbdc = sc->kbdc; synapticshw_t synhw; int status[3]; - int buttons, middle_byte; + int buttons; VLOG(3, (LOG_DEBUG, "synaptics: BEGIN init\n")); /* * Just to be on the safe side: this avoids troubles with * following mouse_ext_command() when the previous command * was PSMC_SET_RESOLUTION. Set Scaling has no effect on * Synaptics Touchpad behaviour. */ set_mouse_scaling(kbdc, 1); /* Identify the Touchpad version. */ if (mouse_ext_command(kbdc, 0) == 0) return (FALSE); if (get_mouse_status(kbdc, status, 0, 3) != 3) return (FALSE); - middle_byte = status[1]; - if (middle_byte != 0x46 && middle_byte != 0x47) + if (status[1] != 0x47) return (FALSE); bzero(&synhw, sizeof(synhw)); synhw.infoMinor = status[0]; synhw.infoMajor = status[2] & 0x0f; if (verbose >= 2) printf("Synaptics Touchpad v%d.%d\n", synhw.infoMajor, synhw.infoMinor); - /* - * Most synaptics touchpads return 0x47 in middle byte in responce to - * identify command as stated in p.4.4 of "Synaptics PS/2 TouchPad - * Interfacing Guide" and we only support v4.0 or better. But some - * devices return 0x46 here and have a different numbering scheme. - * In the case of 0x46, we allow versions as low as v2.0 - */ - if ((middle_byte == 0x47 && synhw.infoMajor < 4) || - (middle_byte == 0x46 && synhw.infoMajor < 2)) { + if (synhw.infoMajor < 4) { printf(" Unsupported (pre-v4) Touchpad detected\n"); return (FALSE); } /* Get the Touchpad model information. */ if (mouse_ext_command(kbdc, 3) == 0) return (FALSE); if (get_mouse_status(kbdc, status, 0, 3) != 3) return (FALSE); if ((status[1] & 0x01) != 0) { printf(" Failed to read model information\n"); return (FALSE); } synhw.infoRot180 = (status[0] & 0x80) != 0; synhw.infoPortrait = (status[0] & 0x40) != 0; synhw.infoSensor = status[0] & 0x3f; synhw.infoHardware = (status[1] & 0xfe) >> 1; synhw.infoNewAbs = (status[2] & 0x80) != 0; synhw.capPen = (status[2] & 0x40) != 0; synhw.infoSimplC = (status[2] & 0x20) != 0; synhw.infoGeometry = status[2] & 0x0f; if (verbose >= 2) { printf(" Model information:\n"); printf(" infoRot180: %d\n", synhw.infoRot180); printf(" infoPortrait: %d\n", synhw.infoPortrait); printf(" infoSensor: %d\n", synhw.infoSensor); printf(" infoHardware: %d\n", synhw.infoHardware); printf(" infoNewAbs: %d\n", synhw.infoNewAbs); printf(" capPen: %d\n", synhw.capPen); printf(" infoSimplC: %d\n", synhw.infoSimplC); printf(" infoGeometry: %d\n", synhw.infoGeometry); } /* Read the extended capability bits. */ if (mouse_ext_command(kbdc, 2) == 0) return (FALSE); if (get_mouse_status(kbdc, status, 0, 3) != 3) return (FALSE); - if (!SYNAPTICS_VERSION_GE(synhw, 7, 5) && status[1] != middle_byte) { + if (!SYNAPTICS_VERSION_GE(synhw, 7, 5) && status[1] != 0x47) { printf(" Failed to read extended capability bits\n"); return (FALSE); } psmcpnp = devclass_get_device(devclass_find(PSMCPNP_DRIVER_NAME), sc->unit); psmcpnp_sc = (psmcpnp != NULL) ? device_get_softc(psmcpnp) : NULL; - /* - * Set conservative defaults for 0x46 middle byte touchpads - * as ExtendedQueries return bogus data. - */ - if (middle_byte == 0x46) { - synhw.capExtended = 1; - synhw.capPalmDetect = 1; - synhw.capPassthrough = 1; - synhw.capMultiFinger = 1; - synhw.maximumXCoord = SYNAPTICS_DEFAULT_MAX_X; - synhw.maximumYCoord = SYNAPTICS_DEFAULT_MAX_Y; - synhw.minimumXCoord = SYNAPTICS_DEFAULT_MIN_X; - synhw.minimumYCoord = SYNAPTICS_DEFAULT_MIN_Y; - /* Enable multitouch mode for HW v8.1 devices */ - if (psmcpnp_sc != NULL && - psmcpnp_sc->type == PSMCPNP_HPSYN81) - synhw.capReportsV = 1; - } else - synhw.capExtended = (status[0] & 0x80) != 0; - /* Set the different capabilities when they exist. */ buttons = 0; - if (synhw.capExtended && middle_byte == 0x47) { + synhw.capExtended = (status[0] & 0x80) != 0; + if (synhw.capExtended) { synhw.nExtendedQueries = (status[0] & 0x70) >> 4; synhw.capMiddle = (status[0] & 0x04) != 0; synhw.capPassthrough = (status[2] & 0x80) != 0; synhw.capLowPower = (status[2] & 0x40) != 0; synhw.capMultiFingerReport = (status[2] & 0x20) != 0; synhw.capSleep = (status[2] & 0x10) != 0; synhw.capFourButtons = (status[2] & 0x08) != 0; synhw.capBallistics = (status[2] & 0x04) != 0; synhw.capMultiFinger = (status[2] & 0x02) != 0; synhw.capPalmDetect = (status[2] & 0x01) != 0; if (!set_mouse_scaling(kbdc, 1)) return (FALSE); if (mouse_ext_command(kbdc, 0x08) == 0) return (FALSE); if (get_mouse_status(kbdc, status, 0, 3) != 3) return (FALSE); if (status[0] != 0 && (status[1] & 0x80) && status[2] != 0) { synhw.infoXupmm = status[0]; synhw.infoYupmm = status[2]; } if (verbose >= 2) { printf(" Extended capabilities:\n"); printf(" capExtended: %d\n", synhw.capExtended); printf(" capMiddle: %d\n", synhw.capMiddle); printf(" nExtendedQueries: %d\n", synhw.nExtendedQueries); printf(" capPassthrough: %d\n", synhw.capPassthrough); printf(" capLowPower: %d\n", synhw.capLowPower); printf(" capMultiFingerReport: %d\n", synhw.capMultiFingerReport); printf(" capSleep: %d\n", synhw.capSleep); printf(" capFourButtons: %d\n", synhw.capFourButtons); printf(" capBallistics: %d\n", synhw.capBallistics); printf(" capMultiFinger: %d\n", synhw.capMultiFinger); printf(" capPalmDetect: %d\n", synhw.capPalmDetect); printf(" infoXupmm: %d\n", synhw.infoXupmm); printf(" infoYupmm: %d\n", synhw.infoYupmm); } /* * If nExtendedQueries is 1 or greater, then the TouchPad * supports this number of extended queries. We can load * more information about buttons using query 0x09. */ if (synhw.nExtendedQueries >= 1) { if (!set_mouse_scaling(kbdc, 1)) return (FALSE); if (mouse_ext_command(kbdc, 0x09) == 0) return (FALSE); if (get_mouse_status(kbdc, status, 0, 3) != 3) return (FALSE); synhw.verticalScroll = (status[0] & 0x01) != 0; synhw.horizontalScroll = (status[0] & 0x02) != 0; synhw.verticalWheel = (status[0] & 0x08) != 0; synhw.nExtendedButtons = (status[1] & 0xf0) >> 4; synhw.capEWmode = (status[0] & 0x04) != 0; if (verbose >= 2) { printf(" Extended model ID:\n"); printf(" verticalScroll: %d\n", synhw.verticalScroll); printf(" horizontalScroll: %d\n", synhw.horizontalScroll); printf(" verticalWheel: %d\n", synhw.verticalWheel); printf(" nExtendedButtons: %d\n", synhw.nExtendedButtons); printf(" capEWmode: %d\n", synhw.capEWmode); } /* * Add the number of extended buttons to the total * button support count, including the middle button * if capMiddle support bit is set. */ buttons = synhw.nExtendedButtons + synhw.capMiddle; } else /* * If the capFourButtons support bit is set, * add a fourth button to the total button count. */ buttons = synhw.capFourButtons ? 1 : 0; /* Read the continued capabilities bits. */ if (synhw.nExtendedQueries >= 4) { if (!set_mouse_scaling(kbdc, 1)) return (FALSE); if (mouse_ext_command(kbdc, 0x0c) == 0) return (FALSE); if (get_mouse_status(kbdc, status, 0, 3) != 3) return (FALSE); synhw.capClickPad = (status[1] & 0x01) << 1; synhw.capClickPad |= (status[0] & 0x10) != 0; synhw.capDeluxeLEDs = (status[1] & 0x02) != 0; synhw.noAbsoluteFilter = (status[1] & 0x04) != 0; synhw.capReportsV = (status[1] & 0x08) != 0; synhw.capUniformClickPad = (status[1] & 0x10) != 0; synhw.capReportsMin = (status[1] & 0x20) != 0; synhw.capInterTouch = (status[1] & 0x40) != 0; synhw.capReportsMax = (status[0] & 0x02) != 0; synhw.capClearPad = (status[0] & 0x04) != 0; synhw.capAdvancedGestures = (status[0] & 0x08) != 0; synhw.capCoveredPad = (status[0] & 0x80) != 0; if (synhw.capReportsMax) { if (!set_mouse_scaling(kbdc, 1)) return (FALSE); if (mouse_ext_command(kbdc, 0x0d) == 0) return (FALSE); if (get_mouse_status(kbdc, status, 0, 3) != 3) return (FALSE); synhw.maximumXCoord = (status[0] << 5) | ((status[1] & 0x0f) << 1); synhw.maximumYCoord = (status[2] << 5) | ((status[1] & 0xf0) >> 3); } else { - synhw.maximumXCoord = SYNAPTICS_DEFAULT_MAX_X; - synhw.maximumYCoord = SYNAPTICS_DEFAULT_MAX_Y; + /* + * Typical bezel limits. Taken from 'Synaptics + * PS/2 * TouchPad Interfacing Guide' p.3.2.3. + */ + synhw.maximumXCoord = 5472; + synhw.maximumYCoord = 4448; } if (synhw.capReportsMin) { if (!set_mouse_scaling(kbdc, 1)) return (FALSE); if (mouse_ext_command(kbdc, 0x0f) == 0) return (FALSE); if (get_mouse_status(kbdc, status, 0, 3) != 3) return (FALSE); synhw.minimumXCoord = (status[0] << 5) | ((status[1] & 0x0f) << 1); synhw.minimumYCoord = (status[2] << 5) | ((status[1] & 0xf0) >> 3); } else { - synhw.minimumXCoord = SYNAPTICS_DEFAULT_MIN_X; - synhw.minimumYCoord = SYNAPTICS_DEFAULT_MIN_Y; + /* + * Typical bezel limits. Taken from 'Synaptics + * PS/2 * TouchPad Interfacing Guide' p.3.2.3. + */ + synhw.minimumXCoord = 1472; + synhw.minimumYCoord = 1408; } /* * ClickPad properties are not exported through PS/2 * protocol. Detection is based on controller's PnP ID. */ if (synhw.capClickPad && psmcpnp_sc != NULL) { switch (psmcpnp_sc->type) { case PSMCPNP_FORCEPAD: synhw.forcePad = 1; break; default: break; } } if (verbose >= 2) { printf(" Continued capabilities:\n"); printf(" capClickPad: %d\n", synhw.capClickPad); printf(" capDeluxeLEDs: %d\n", synhw.capDeluxeLEDs); printf(" noAbsoluteFilter: %d\n", synhw.noAbsoluteFilter); printf(" capReportsV: %d\n", synhw.capReportsV); printf(" capUniformClickPad: %d\n", synhw.capUniformClickPad); printf(" capReportsMin: %d\n", synhw.capReportsMin); printf(" capInterTouch: %d\n", synhw.capInterTouch); printf(" capReportsMax: %d\n", synhw.capReportsMax); printf(" capClearPad: %d\n", synhw.capClearPad); printf(" capAdvancedGestures: %d\n", synhw.capAdvancedGestures); printf(" capCoveredPad: %d\n", synhw.capCoveredPad); if (synhw.capReportsMax) { printf(" maximumXCoord: %d\n", synhw.maximumXCoord); printf(" maximumYCoord: %d\n", synhw.maximumYCoord); } if (synhw.capReportsMin) { printf(" minimumXCoord: %d\n", synhw.minimumXCoord); printf(" minimumYCoord: %d\n", synhw.minimumYCoord); } if (synhw.capClickPad) { printf(" forcePad: %d\n", synhw.forcePad); } } buttons += synhw.capClickPad; } } if (verbose >= 2) { if (synhw.capExtended) printf(" Additional Buttons: %d\n", buttons); else printf(" No extended capabilities\n"); } /* * Add the default number of 3 buttons to the total * count of supported buttons reported above. */ buttons += 3; /* * Read the mode byte. * * XXX: Note the Synaptics documentation also defines the first * byte of the response to this query to be a constant 0x3b, this * does not appear to be true for Touchpads with guest devices. */ if (mouse_ext_command(kbdc, 1) == 0) return (FALSE); if (get_mouse_status(kbdc, status, 0, 3) != 3) return (FALSE); - if (!SYNAPTICS_VERSION_GE(synhw, 7, 5) && status[1] != middle_byte) { + if (!SYNAPTICS_VERSION_GE(synhw, 7, 5) && status[1] != 0x47) { printf(" Failed to read mode byte\n"); return (FALSE); } if (arg == PROBE) sc->synhw = synhw; if (!synaptics_support) return (FALSE); /* Set mouse type just now for synaptics_set_mode() */ sc->hw.model = MOUSE_MODEL_SYNAPTICS; synaptics_set_mode(sc, synaptics_preferred_mode(sc)); if (trackpoint_support && synhw.capPassthrough) { enable_trackpoint(sc, arg); } VLOG(3, (LOG_DEBUG, "synaptics: END init (%d buttons)\n", buttons)); if (arg == PROBE) { /* Create sysctl tree. */ synaptics_sysctl_create_tree(sc, "synaptics", "Synaptics TouchPad"); sc->hw.buttons = buttons; } return (TRUE); } static void synaptics_passthrough_on(struct psm_softc *sc) { VLOG(2, (LOG_NOTICE, "psm: setting pass-through mode.\n")); synaptics_set_mode(sc, synaptics_preferred_mode(sc) | (1 << 5)); } static void synaptics_passthrough_off(struct psm_softc *sc) { VLOG(2, (LOG_NOTICE, "psm: turning pass-through mode off.\n")); set_mouse_scaling(sc->kbdc, 2); set_mouse_scaling(sc->kbdc, 1); synaptics_set_mode(sc, synaptics_preferred_mode(sc)); } /* IBM/Lenovo TrackPoint */ static int trackpoint_command(struct psm_softc *sc, int cmd, int loc, int val) { const int seq[] = { 0xe2, cmd, loc, val }; int i; if (sc->synhw.capPassthrough) synaptics_passthrough_on(sc); for (i = 0; i < nitems(seq); i++) { if (sc->synhw.capPassthrough && (seq[i] == 0xff || seq[i] == 0xe7)) if (send_aux_command(sc->kbdc, 0xe7) != PSM_ACK) { synaptics_passthrough_off(sc); return (EIO); } if (send_aux_command(sc->kbdc, seq[i]) != PSM_ACK) { if (sc->synhw.capPassthrough) synaptics_passthrough_off(sc); return (EIO); } } if (sc->synhw.capPassthrough) synaptics_passthrough_off(sc); return (0); } #define PSM_TPINFO(x) offsetof(struct psm_softc, tpinfo.x) #define TPMASK 0 #define TPLOC 1 #define TPINFO 2 static int trackpoint_sysctl(SYSCTL_HANDLER_ARGS) { static const int data[][3] = { { 0x00, 0x4a, PSM_TPINFO(sensitivity) }, { 0x00, 0x4d, PSM_TPINFO(inertia) }, { 0x00, 0x60, PSM_TPINFO(uplateau) }, { 0x00, 0x57, PSM_TPINFO(reach) }, { 0x00, 0x58, PSM_TPINFO(draghys) }, { 0x00, 0x59, PSM_TPINFO(mindrag) }, { 0x00, 0x5a, PSM_TPINFO(upthresh) }, { 0x00, 0x5c, PSM_TPINFO(threshold) }, { 0x00, 0x5d, PSM_TPINFO(jenks) }, { 0x00, 0x5e, PSM_TPINFO(ztime) }, { 0x01, 0x2c, PSM_TPINFO(pts) }, { 0x08, 0x2d, PSM_TPINFO(skipback) } }; struct psm_softc *sc; int error, newval, *oldvalp; const int *tp; if (arg1 == NULL || arg2 < 0 || arg2 >= nitems(data)) return (EINVAL); sc = arg1; tp = data[arg2]; oldvalp = (int *)((intptr_t)sc + tp[TPINFO]); newval = *oldvalp; error = sysctl_handle_int(oidp, &newval, 0, req); if (error != 0) return (error); if (newval == *oldvalp) return (0); if (newval < 0 || newval > (tp[TPMASK] == 0 ? 255 : 1)) return (EINVAL); error = trackpoint_command(sc, tp[TPMASK] == 0 ? 0x81 : 0x47, tp[TPLOC], tp[TPMASK] == 0 ? newval : tp[TPMASK]); if (error != 0) return (error); *oldvalp = newval; return (0); } static void trackpoint_sysctl_create_tree(struct psm_softc *sc) { if (sc->tpinfo.sysctl_tree != NULL) return; /* Attach extra trackpoint sysctl nodes under hw.psm.trackpoint */ sysctl_ctx_init(&sc->tpinfo.sysctl_ctx); sc->tpinfo.sysctl_tree = SYSCTL_ADD_NODE(&sc->tpinfo.sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_psm), OID_AUTO, "trackpoint", CTLFLAG_RD, 0, "IBM/Lenovo TrackPoint"); /* hw.psm.trackpoint.sensitivity */ sc->tpinfo.sensitivity = 0x80; SYSCTL_ADD_PROC(&sc->tpinfo.sysctl_ctx, SYSCTL_CHILDREN(sc->tpinfo.sysctl_tree), OID_AUTO, "sensitivity", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, TRACKPOINT_SYSCTL_SENSITIVITY, trackpoint_sysctl, "I", "Sensitivity"); /* hw.psm.trackpoint.negative_inertia */ sc->tpinfo.inertia = 0x06; SYSCTL_ADD_PROC(&sc->tpinfo.sysctl_ctx, SYSCTL_CHILDREN(sc->tpinfo.sysctl_tree), OID_AUTO, "negative_inertia", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, TRACKPOINT_SYSCTL_NEGATIVE_INERTIA, trackpoint_sysctl, "I", "Negative inertia factor"); /* hw.psm.trackpoint.upper_plateau */ sc->tpinfo.uplateau = 0x61; SYSCTL_ADD_PROC(&sc->tpinfo.sysctl_ctx, SYSCTL_CHILDREN(sc->tpinfo.sysctl_tree), OID_AUTO, "upper_plateau", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, TRACKPOINT_SYSCTL_UPPER_PLATEAU, trackpoint_sysctl, "I", "Transfer function upper plateau speed"); /* hw.psm.trackpoint.backup_range */ sc->tpinfo.reach = 0x0a; SYSCTL_ADD_PROC(&sc->tpinfo.sysctl_ctx, SYSCTL_CHILDREN(sc->tpinfo.sysctl_tree), OID_AUTO, "backup_range", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, TRACKPOINT_SYSCTL_BACKUP_RANGE, trackpoint_sysctl, "I", "Backup range"); /* hw.psm.trackpoint.drag_hysteresis */ sc->tpinfo.draghys = 0xff; SYSCTL_ADD_PROC(&sc->tpinfo.sysctl_ctx, SYSCTL_CHILDREN(sc->tpinfo.sysctl_tree), OID_AUTO, "drag_hysteresis", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, TRACKPOINT_SYSCTL_DRAG_HYSTERESIS, trackpoint_sysctl, "I", "Drag hysteresis"); /* hw.psm.trackpoint.minimum_drag */ sc->tpinfo.mindrag = 0x14; SYSCTL_ADD_PROC(&sc->tpinfo.sysctl_ctx, SYSCTL_CHILDREN(sc->tpinfo.sysctl_tree), OID_AUTO, "minimum_drag", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, TRACKPOINT_SYSCTL_MINIMUM_DRAG, trackpoint_sysctl, "I", "Minimum drag"); /* hw.psm.trackpoint.up_threshold */ sc->tpinfo.upthresh = 0xff; SYSCTL_ADD_PROC(&sc->tpinfo.sysctl_ctx, SYSCTL_CHILDREN(sc->tpinfo.sysctl_tree), OID_AUTO, "up_threshold", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, TRACKPOINT_SYSCTL_UP_THRESHOLD, trackpoint_sysctl, "I", "Up threshold for release"); /* hw.psm.trackpoint.threshold */ sc->tpinfo.threshold = 0x08; SYSCTL_ADD_PROC(&sc->tpinfo.sysctl_ctx, SYSCTL_CHILDREN(sc->tpinfo.sysctl_tree), OID_AUTO, "threshold", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, TRACKPOINT_SYSCTL_THRESHOLD, trackpoint_sysctl, "I", "Threshold"); /* hw.psm.trackpoint.jenks_curvature */ sc->tpinfo.jenks = 0x87; SYSCTL_ADD_PROC(&sc->tpinfo.sysctl_ctx, SYSCTL_CHILDREN(sc->tpinfo.sysctl_tree), OID_AUTO, "jenks_curvature", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, TRACKPOINT_SYSCTL_JENKS_CURVATURE, trackpoint_sysctl, "I", "Jenks curvature"); /* hw.psm.trackpoint.z_time */ sc->tpinfo.ztime = 0x26; SYSCTL_ADD_PROC(&sc->tpinfo.sysctl_ctx, SYSCTL_CHILDREN(sc->tpinfo.sysctl_tree), OID_AUTO, "z_time", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, TRACKPOINT_SYSCTL_Z_TIME, trackpoint_sysctl, "I", "Z time constant"); /* hw.psm.trackpoint.press_to_select */ sc->tpinfo.pts = 0x00; SYSCTL_ADD_PROC(&sc->tpinfo.sysctl_ctx, SYSCTL_CHILDREN(sc->tpinfo.sysctl_tree), OID_AUTO, "press_to_select", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, TRACKPOINT_SYSCTL_PRESS_TO_SELECT, trackpoint_sysctl, "I", "Press to Select"); /* hw.psm.trackpoint.skip_backups */ sc->tpinfo.skipback = 0x00; SYSCTL_ADD_PROC(&sc->tpinfo.sysctl_ctx, SYSCTL_CHILDREN(sc->tpinfo.sysctl_tree), OID_AUTO, "skip_backups", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, sc, TRACKPOINT_SYSCTL_SKIP_BACKUPS, trackpoint_sysctl, "I", "Skip backups from drags"); } static void set_trackpoint_parameters(struct psm_softc *sc) { trackpoint_command(sc, 0x81, 0x4a, sc->tpinfo.sensitivity); trackpoint_command(sc, 0x81, 0x60, sc->tpinfo.uplateau); trackpoint_command(sc, 0x81, 0x4d, sc->tpinfo.inertia); trackpoint_command(sc, 0x81, 0x57, sc->tpinfo.reach); trackpoint_command(sc, 0x81, 0x58, sc->tpinfo.draghys); trackpoint_command(sc, 0x81, 0x59, sc->tpinfo.mindrag); trackpoint_command(sc, 0x81, 0x5a, sc->tpinfo.upthresh); trackpoint_command(sc, 0x81, 0x5c, sc->tpinfo.threshold); trackpoint_command(sc, 0x81, 0x5d, sc->tpinfo.jenks); trackpoint_command(sc, 0x81, 0x5e, sc->tpinfo.ztime); if (sc->tpinfo.pts == 0x01) trackpoint_command(sc, 0x47, 0x2c, 0x01); if (sc->tpinfo.skipback == 0x01) trackpoint_command(sc, 0x47, 0x2d, 0x08); } static int enable_trackpoint(struct psm_softc *sc, enum probearg arg) { KBDC kbdc = sc->kbdc; int id; /* * If called from enable_synaptics(), make sure that passthrough * mode is enabled so we can reach the trackpoint. * However, passthrough mode must be disabled before setting the * trackpoint parameters, as rackpoint_command() enables and disables * passthrough mode on its own. */ if (sc->synhw.capPassthrough) synaptics_passthrough_on(sc); if (send_aux_command(kbdc, 0xe1) != PSM_ACK || read_aux_data(kbdc) != 0x01) goto no_trackpoint; id = read_aux_data(kbdc); if (id < 0x01) goto no_trackpoint; if (arg == PROBE) sc->tphw = id; if (!trackpoint_support) goto no_trackpoint; if (sc->synhw.capPassthrough) synaptics_passthrough_off(sc); if (arg == PROBE) { trackpoint_sysctl_create_tree(sc); /* * Don't overwrite hwid and buttons when we are * a guest device. */ if (!sc->synhw.capPassthrough) { sc->hw.hwid = id; sc->hw.buttons = 3; } } set_trackpoint_parameters(sc); return (TRUE); no_trackpoint: if (sc->synhw.capPassthrough) synaptics_passthrough_off(sc); return (FALSE); } /* Interlink electronics VersaPad */ static int enable_versapad(struct psm_softc *sc, enum probearg arg) { KBDC kbdc = sc->kbdc; int data[3]; set_mouse_resolution(kbdc, PSMD_RES_MEDIUM_HIGH); /* set res. 2 */ set_mouse_sampling_rate(kbdc, 100); /* set rate 100 */ set_mouse_scaling(kbdc, 1); /* set scale 1:1 */ set_mouse_scaling(kbdc, 1); /* set scale 1:1 */ set_mouse_scaling(kbdc, 1); /* set scale 1:1 */ set_mouse_scaling(kbdc, 1); /* set scale 1:1 */ if (get_mouse_status(kbdc, data, 0, 3) < 3) /* get status */ return (FALSE); if (data[2] != 0xa || data[1] != 0 ) /* rate == 0xa && res. == 0 */ return (FALSE); set_mouse_scaling(kbdc, 1); /* set scale 1:1 */ return (TRUE); /* PS/2 absolute mode */ } /* Elantech Touchpad */ static int elantech_read_1(KBDC kbdc, int hwversion, int reg, int *val) { int res, readcmd, retidx; int resp[3]; readcmd = hwversion == 2 ? ELANTECH_REG_READ : ELANTECH_REG_RDWR; retidx = hwversion == 4 ? 1 : 0; res = send_aux_command(kbdc, ELANTECH_CUSTOM_CMD) != PSM_ACK; res |= send_aux_command(kbdc, readcmd) != PSM_ACK; res |= send_aux_command(kbdc, ELANTECH_CUSTOM_CMD) != PSM_ACK; res |= send_aux_command(kbdc, reg) != PSM_ACK; res |= get_mouse_status(kbdc, resp, 0, 3) != 3; if (res == 0) *val = resp[retidx]; return (res); } static int elantech_write_1(KBDC kbdc, int hwversion, int reg, int val) { int res, writecmd; writecmd = hwversion == 2 ? ELANTECH_REG_WRITE : ELANTECH_REG_RDWR; res = send_aux_command(kbdc, ELANTECH_CUSTOM_CMD) != PSM_ACK; res |= send_aux_command(kbdc, writecmd) != PSM_ACK; res |= send_aux_command(kbdc, ELANTECH_CUSTOM_CMD) != PSM_ACK; res |= send_aux_command(kbdc, reg) != PSM_ACK; if (hwversion == 4) { res |= send_aux_command(kbdc, ELANTECH_CUSTOM_CMD) != PSM_ACK; res |= send_aux_command(kbdc, writecmd) != PSM_ACK; } res |= send_aux_command(kbdc, ELANTECH_CUSTOM_CMD) != PSM_ACK; res |= send_aux_command(kbdc, val) != PSM_ACK; res |= set_mouse_scaling(kbdc, 1) == 0; return (res); } static int elantech_cmd(KBDC kbdc, int hwversion, int cmd, int *resp) { int res; if (hwversion == 2) { res = set_mouse_scaling(kbdc, 1) == 0; res |= mouse_ext_command(kbdc, cmd) == 0; } else { res = send_aux_command(kbdc, ELANTECH_CUSTOM_CMD) != PSM_ACK; res |= send_aux_command(kbdc, cmd) != PSM_ACK; } res |= get_mouse_status(kbdc, resp, 0, 3) != 3; return (res); } static int elantech_init(KBDC kbdc, elantechhw_t *elanhw) { int i, val, res, hwversion, reg10; /* set absolute mode */ hwversion = elanhw->hwversion; reg10 = -1; switch (hwversion) { case 2: reg10 = elanhw->fwversion == 0x020030 ? 0x54 : 0xc4; res = elantech_write_1(kbdc, hwversion, 0x10, reg10); if (res) break; res = elantech_write_1(kbdc, hwversion, 0x11, 0x8A); break; case 3: reg10 = 0x0b; res = elantech_write_1(kbdc, hwversion, 0x10, reg10); break; case 4: res = elantech_write_1(kbdc, hwversion, 0x07, 0x01); break; default: res = 1; } /* Read back reg 0x10 to ensure hardware is ready. */ if (res == 0 && reg10 >= 0) { for (i = 0; i < 5; i++) { if (elantech_read_1(kbdc, hwversion, 0x10, &val) == 0) break; DELAY(2000); } if (i == 5) res = 1; } if (res) printf("couldn't set absolute mode\n"); return (res); } static void elantech_init_synaptics(struct psm_softc *sc) { /* Set capabilites required by movement smother */ sc->synhw.infoMajor = sc->elanhw.hwversion; sc->synhw.infoMinor = sc->elanhw.fwversion; sc->synhw.infoXupmm = sc->elanhw.dpmmx; sc->synhw.infoYupmm = sc->elanhw.dpmmy; sc->synhw.verticalScroll = 0; sc->synhw.nExtendedQueries = 4; sc->synhw.capExtended = 1; sc->synhw.capPassthrough = sc->elanhw.hastrackpoint; sc->synhw.capClickPad = sc->elanhw.isclickpad; sc->synhw.capMultiFinger = 1; if (sc->elanhw.issemimt) sc->synhw.capAdvancedGestures = 1; else sc->synhw.capReportsV = 1; sc->synhw.capPalmDetect = 1; sc->synhw.capPen = 0; sc->synhw.capReportsMax = 1; sc->synhw.maximumXCoord = sc->elanhw.sizex; sc->synhw.maximumYCoord = sc->elanhw.sizey; sc->synhw.capReportsMin = 1; sc->synhw.minimumXCoord = 0; sc->synhw.minimumYCoord = 0; if (sc->syninfo.sysctl_tree == NULL) { synaptics_sysctl_create_tree(sc, "elantech", "Elantech Touchpad"); /* * Adjust synaptic smoother tunables * 1. Disable finger detection pressure threshold. Unlike * synaptics we assume the finger is acting when packet with * its X&Y arrives not when pressure exceedes some threshold * 2. Disable unrelated features like margins and noisy areas * 3. Disable virtual scroll areas as 2nd finger is preferable * 4. For clickpads set bottom quarter as 42% - 16% - 42% sized * softbuttons * 5. Scale down divisors and movement lengths by a factor of 3 * where 3 is Synaptics to Elantech (~2200/800) dpi ratio */ /* Set reporting range to be equal touchpad size */ sc->syninfo.max_x = sc->elanhw.sizex; sc->syninfo.max_y = sc->elanhw.sizey; /* Disable finger detection pressure threshold */ sc->syninfo.min_pressure = 1; /* Adjust palm width to nearly match synaptics w=10 */ sc->syninfo.max_width = 7; /* Elans often report double & triple taps as single event */ sc->syninfo.tap_min_queue = 1; /* Use full area of touchpad */ sc->syninfo.margin_top = 0; sc->syninfo.margin_right = 0; sc->syninfo.margin_bottom = 0; sc->syninfo.margin_left = 0; /* Disable noisy area */ sc->syninfo.na_top = 0; sc->syninfo.na_right = 0; sc->syninfo.na_bottom = 0; sc->syninfo.na_left = 0; /* Tune divisors and movement lengths */ sc->syninfo.weight_len_squared = 200; sc->syninfo.div_min = 3; sc->syninfo.div_max = 6; sc->syninfo.div_max_na = 10; sc->syninfo.div_len = 30; sc->syninfo.tap_max_delta = 25; /* Disable virtual scrolling areas and tune its divisors */ sc->syninfo.vscroll_hor_area = 0; sc->syninfo.vscroll_ver_area = 0; sc->syninfo.vscroll_min_delta = 15; sc->syninfo.vscroll_div_min = 30; sc->syninfo.vscroll_div_max = 50; /* Set bottom quarter as 42% - 16% - 42% sized softbuttons */ if (sc->elanhw.isclickpad) { sc->syninfo.softbuttons_y = sc->elanhw.sizey / 4; sc->syninfo.softbutton2_x = sc->elanhw.sizex * 11 / 25; sc->syninfo.softbutton3_x = sc->elanhw.sizex * 14 / 25; } } return; } static int enable_elantech(struct psm_softc *sc, enum probearg arg) { static const int ic2hw[] = /*IC: 0 1 2 3 4 5 6 7 8 9 a b c d e f */ { 0, 0, 2, 0, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0 }; static const int fw_sizes[][3] = { /* FW.vers MaxX MaxY */ { 0x020030, 1152, 768 }, { 0x020800, 1152, 768 }, { 0x020b00, 1152, 768 }, { 0x040215, 900, 500 }, { 0x040216, 819, 405 }, { 0x040219, 900, 500 }, }; elantechhw_t elanhw; int icversion, hwversion, xtr, i, id, resp[3], dpix, dpiy; KBDC kbdc = sc->kbdc; VLOG(3, (LOG_DEBUG, "elantech: BEGIN init\n")); set_mouse_scaling(kbdc, 1); set_mouse_scaling(kbdc, 1); set_mouse_scaling(kbdc, 1); if (get_mouse_status(kbdc, resp, 0, 3) != 3) return (FALSE); if (!ELANTECH_MAGIC(resp)) return (FALSE); /* Identify the Touchpad version. */ if (elantech_cmd(kbdc, 2, ELANTECH_FW_VERSION, resp)) return (FALSE); bzero(&elanhw, sizeof(elanhw)); elanhw.fwversion = (resp[0] << 16) | (resp[1] << 8) | resp[2]; icversion = resp[0] & 0x0f; hwversion = ic2hw[icversion]; if (verbose >= 2) printf("Elantech touchpad hardware v.%d firmware v.0x%06x\n", hwversion, elanhw.fwversion); if (ELANTECH_HW_IS_V1(elanhw.fwversion)) { printf (" Unsupported touchpad hardware (v1)\n"); return (FALSE); } if (hwversion == 0) { printf (" Unknown touchpad hardware (firmware v.0x%06x)\n", elanhw.fwversion); return (FALSE); } /* Get the Touchpad model information. */ elanhw.hwversion = hwversion; elanhw.issemimt = hwversion == 2; elanhw.isclickpad = (resp[1] & 0x10) != 0; elanhw.hascrc = (resp[1] & 0x40) != 0; elanhw.haspressure = elanhw.fwversion >= 0x020800; /* Read the capability bits. */ if (elantech_cmd(kbdc, hwversion, ELANTECH_CAPABILITIES, resp) != 0) { printf(" Failed to read capability bits\n"); return (FALSE); } elanhw.ntracesx = imax(resp[1], 3); elanhw.ntracesy = imax(resp[2], 3); elanhw.hastrackpoint = (resp[0] & 0x80) != 0; /* Get the touchpad resolution */ switch (hwversion) { case 4: if (elantech_cmd(kbdc, hwversion, ELANTECH_RESOLUTION, resp) == 0) { dpix = (resp[1] & 0x0f) * 10 + 790; dpiy = ((resp[1] & 0xf0) >> 4) * 10 + 790; elanhw.dpmmx = (dpix * 10 + 5) / 254; elanhw.dpmmy = (dpiy * 10 + 5) / 254; break; } /* FALLTHROUGH */ case 2: case 3: elanhw.dpmmx = elanhw.dpmmy = 32; /* 800 dpi */ break; } if (!elantech_support) return (FALSE); if (elantech_init(kbdc, &elanhw)) { printf("couldn't initialize elantech touchpad\n"); return (FALSE); } /* * Get the touchpad reporting range. * On HW v.3 touchpads it should be done after switching hardware * to real resolution mode (by setting bit 3 of reg10) */ elanhw.dptracex = elanhw.dptracey = 64; for (i = 0; i < nitems(fw_sizes); i++) { if (elanhw.fwversion == fw_sizes[i][0]) { elanhw.sizex = fw_sizes[i][1]; elanhw.sizey = fw_sizes[i][2]; goto found; } } if (elantech_cmd(kbdc, hwversion, ELANTECH_FW_ID, resp) != 0) { printf(" Failed to read touchpad size\n"); elanhw.sizex = 10000; /* Arbitrary high values to */ elanhw.sizey = 10000; /* prevent clipping in smoother */ } else if (hwversion == 2) { if ((elanhw.fwversion >> 16) == 0x14 && (resp[1] & 0x10) && !elantech_cmd(kbdc, hwversion, ELANTECH_SAMPLE, resp)) { elanhw.dptracex = resp[1] / 2; elanhw.dptracey = resp[2] / 2; } xtr = ((elanhw.fwversion >> 8) == 0x0208) ? 1 : 2; elanhw.sizex = (elanhw.ntracesx - xtr) * elanhw.dptracex; elanhw.sizey = (elanhw.ntracesy - xtr) * elanhw.dptracey; } else { elanhw.sizex = (resp[0] & 0x0f) << 8 | resp[1]; elanhw.sizey = (resp[0] & 0xf0) << 4 | resp[2]; xtr = (elanhw.sizex % (elanhw.ntracesx - 2) == 0) ? 2 : 1; elanhw.dptracex = elanhw.sizex / (elanhw.ntracesx - xtr); elanhw.dptracey = elanhw.sizey / (elanhw.ntracesy - xtr); } found: if (verbose >= 2) { printf(" Model information:\n"); printf(" MaxX: %d\n", elanhw.sizex); printf(" MaxY: %d\n", elanhw.sizey); printf(" DpmmX: %d\n", elanhw.dpmmx); printf(" DpmmY: %d\n", elanhw.dpmmy); printf(" TracesX: %d\n", elanhw.ntracesx); printf(" TracesY: %d\n", elanhw.ntracesy); printf(" DptraceX: %d\n", elanhw.dptracex); printf(" DptraceY: %d\n", elanhw.dptracey); printf(" SemiMT: %d\n", elanhw.issemimt); printf(" Clickpad: %d\n", elanhw.isclickpad); printf(" Trackpoint: %d\n", elanhw.hastrackpoint); printf(" CRC: %d\n", elanhw.hascrc); printf(" Pressure: %d\n", elanhw.haspressure); } VLOG(3, (LOG_DEBUG, "elantech: END init\n")); if (arg == PROBE) { sc->elanhw = elanhw; sc->hw.buttons = 3; /* Initialize synaptics movement smoother */ elantech_init_synaptics(sc); for (id = 0; id < ELANTECH_MAX_FINGERS; id++) PSM_FINGER_RESET(sc->elanaction.fingers[id]); } return (TRUE); } /* * Return true if 'now' is earlier than (start + (secs.usecs)). * Now may be NULL and the function will fetch the current time from * getmicrouptime(), or a cached 'now' can be passed in. * All values should be numbers derived from getmicrouptime(). */ static int timeelapsed(start, secs, usecs, now) const struct timeval *start, *now; int secs, usecs; { struct timeval snow, tv; /* if there is no 'now' passed in, the get it as a convience. */ if (now == NULL) { getmicrouptime(&snow); now = &snow; } tv.tv_sec = secs; tv.tv_usec = usecs; timevaladd(&tv, start); return (timevalcmp(&tv, now, <)); } static int psmresume(device_t dev) { struct psm_softc *sc = device_get_softc(dev); int unit = device_get_unit(dev); int err; VLOG(2, (LOG_NOTICE, "psm%d: system resume hook called.\n", unit)); if ((sc->config & (PSM_CONFIG_HOOKRESUME | PSM_CONFIG_INITAFTERSUSPEND)) == 0) return (0); err = reinitialize(sc, sc->config & PSM_CONFIG_INITAFTERSUSPEND); if ((sc->state & PSM_ASLP) && !(sc->state & PSM_VALID)) { /* * Release the blocked process; it must be notified that * the device cannot be accessed anymore. */ sc->state &= ~PSM_ASLP; wakeup(sc); } VLOG(2, (LOG_DEBUG, "psm%d: system resume hook exiting.\n", unit)); return (err); } DRIVER_MODULE(psm, atkbdc, psm_driver, psm_devclass, 0, 0); #ifdef EVDEV_SUPPORT MODULE_DEPEND(psm, evdev, 1, 1, 1); #endif #ifdef DEV_ISA /* * This sucks up assignments from PNPBIOS and ACPI. */ /* * When the PS/2 mouse device is reported by ACPI or PnP BIOS, it may * appear BEFORE the AT keyboard controller. As the PS/2 mouse device * can be probed and attached only after the AT keyboard controller is * attached, we shall quietly reserve the IRQ resource for later use. * If the PS/2 mouse device is reported to us AFTER the keyboard controller, * copy the IRQ resource to the PS/2 mouse device instance hanging * under the keyboard controller, then probe and attach it. */ static devclass_t psmcpnp_devclass; static device_probe_t psmcpnp_probe; static device_attach_t psmcpnp_attach; static device_method_t psmcpnp_methods[] = { DEVMETHOD(device_probe, psmcpnp_probe), DEVMETHOD(device_attach, psmcpnp_attach), { 0, 0 } }; static driver_t psmcpnp_driver = { PSMCPNP_DRIVER_NAME, psmcpnp_methods, sizeof(struct psmcpnp_softc), }; static struct isa_pnp_id psmcpnp_ids[] = { { 0x030fd041, "PS/2 mouse port" }, /* PNP0F03 */ { 0x0e0fd041, "PS/2 mouse port" }, /* PNP0F0E */ { 0x120fd041, "PS/2 mouse port" }, /* PNP0F12 */ { 0x130fd041, "PS/2 mouse port" }, /* PNP0F13 */ { 0x1303d041, "PS/2 port" }, /* PNP0313, XXX */ { 0x02002e4f, "Dell PS/2 mouse port" }, /* Lat. X200, Dell */ { 0x0002a906, "ALPS Glide Point" }, /* ALPS Glide Point */ { 0x80374d24, "IBM PS/2 mouse port" }, /* IBM3780, ThinkPad */ { 0x81374d24, "IBM PS/2 mouse port" }, /* IBM3781, ThinkPad */ { 0x0190d94d, "SONY VAIO PS/2 mouse port"}, /* SNY9001, Vaio */ { 0x0290d94d, "SONY VAIO PS/2 mouse port"}, /* SNY9002, Vaio */ { 0x0390d94d, "SONY VAIO PS/2 mouse port"}, /* SNY9003, Vaio */ { 0x0490d94d, "SONY VAIO PS/2 mouse port"}, /* SNY9004, Vaio */ { 0 } }; /* _HID list for quirk detection. Any device below has _CID from psmcpnp_ids */ static struct isa_pnp_id forcepad_ids[] = { { 0x0d302e4f, "HP PS/2 forcepad port" }, /* SYN300D, EB 1040 */ { 0x14302e4f, "HP PS/2 forcepad port" }, /* SYN3014, EB 1040 */ { 0 } }; -/* List of HW v8.1 synaptics touchpads erroneously detected as HW v2.0 */ -static struct isa_pnp_id hpsyn81_ids[] = { - { 0x9e012e4f, "HP PS/2 trackpad port" }, /* SYN019E, EB 9470 */ - { 0 } -}; - static int create_a_copy(device_t atkbdc, device_t me) { device_t psm; u_long irq; /* find the PS/2 mouse device instance under the keyboard controller */ psm = device_find_child(atkbdc, PSM_DRIVER_NAME, device_get_unit(atkbdc)); if (psm == NULL) return (ENXIO); if (device_get_state(psm) != DS_NOTPRESENT) return (0); /* move our resource to the found device */ irq = bus_get_resource_start(me, SYS_RES_IRQ, 0); bus_delete_resource(me, SYS_RES_IRQ, 0); bus_set_resource(psm, SYS_RES_IRQ, KBDC_RID_AUX, irq, 1); /* ...then probe and attach it */ return (device_probe_and_attach(psm)); } static int psmcpnp_probe(device_t dev) { struct psmcpnp_softc *sc = device_get_softc(dev); struct resource *res; u_long irq; int rid; if (ISA_PNP_PROBE(device_get_parent(dev), dev, forcepad_ids) == 0) sc->type = PSMCPNP_FORCEPAD; - else if(ISA_PNP_PROBE(device_get_parent(dev), dev, hpsyn81_ids) == 0) - sc->type = PSMCPNP_HPSYN81; else if (ISA_PNP_PROBE(device_get_parent(dev), dev, psmcpnp_ids) == 0) sc->type = PSMCPNP_GENERIC; else return (ENXIO); /* * The PnP BIOS and ACPI are supposed to assign an IRQ (12) * to the PS/2 mouse device node. But, some buggy PnP BIOS * declares the PS/2 mouse device node without an IRQ resource! * If this happens, we shall refer to device hints. * If we still don't find it there, use a hardcoded value... XXX */ rid = 0; irq = bus_get_resource_start(dev, SYS_RES_IRQ, rid); if (irq <= 0) { if (resource_long_value(PSM_DRIVER_NAME, device_get_unit(dev),"irq", &irq) != 0) irq = 12; /* XXX */ device_printf(dev, "irq resource info is missing; " "assuming irq %ld\n", irq); bus_set_resource(dev, SYS_RES_IRQ, rid, irq, 1); } res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 0); bus_release_resource(dev, SYS_RES_IRQ, rid, res); /* keep quiet */ if (!bootverbose) device_quiet(dev); return ((res == NULL) ? ENXIO : 0); } static int psmcpnp_attach(device_t dev) { device_t atkbdc; /* find the keyboard controller, which may be on acpi* or isa* bus */ atkbdc = devclass_get_device(devclass_find(ATKBDC_DRIVER_NAME), device_get_unit(dev)); if ((atkbdc != NULL) && (device_get_state(atkbdc) == DS_ATTACHED)) create_a_copy(atkbdc, dev); return (0); } DRIVER_MODULE(psmcpnp, isa, psmcpnp_driver, psmcpnp_devclass, 0, 0); DRIVER_MODULE(psmcpnp, acpi, psmcpnp_driver, psmcpnp_devclass, 0, 0); ISA_PNP_INFO(psmcpnp_ids); #endif /* DEV_ISA */ Index: projects/clang700-import/sys/dev/sfxge/common/ef10_ev.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/ef10_ev.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/ef10_ev.c (revision 340918) @@ -1,1416 +1,1418 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_MON_STATS #include "mcdi_mon.h" #endif #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_QSTATS #define EFX_EV_QSTAT_INCR(_eep, _stat) \ do { \ (_eep)->ee_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_EV_QSTAT_INCR(_eep, _stat) #endif /* * Non-interrupting event queue requires interrrupting event queue to * refer to for wake-up events even if wake ups are never used. * It could be even non-allocated event queue. */ #define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0) static __checkReturn boolean_t ef10_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn boolean_t ef10_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); static __checkReturn efx_rc_t efx_mcdi_set_evq_tmr( __in efx_nic_t *enp, __in uint32_t instance, __in uint32_t mode, __in uint32_t timer_ns) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN, MC_CMD_SET_EVQ_TMR_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_EVQ_TMR; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN; MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance); MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns); MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns); MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) { rc = EMSGSIZE; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_init_evq( __in efx_nic_t *enp, __in unsigned int instance, __in efsys_mem_t *esmp, __in size_t nevs, __in uint32_t irq, __in uint32_t us, __in uint32_t flags, __in boolean_t low_latency) { efx_mcdi_req_t req; uint8_t payload[ MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), MC_CMD_INIT_EVQ_OUT_LEN)]; efx_qword_t *dma_addr; uint64_t addr; int npages; int i; boolean_t interrupting; int ev_cut_through; efx_rc_t rc; npages = EFX_EVQ_NBUFS(nevs); if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq); interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); /* * On Huntington RX and TX event batching can only be requested together * (even if the datapath firmware doesn't actually support RX * batching). If event cut through is enabled no RX batching will occur. * * So always enable RX and TX event batching, and enable event cut * through if we want low latency operation. */ switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { case EFX_EVQ_FLAGS_TYPE_AUTO: ev_cut_through = low_latency ? 1 : 0; break; case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: ev_cut_through = 0; break; case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: ev_cut_through = 1; break; default: rc = EINVAL; goto fail2; } MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS, INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting, INIT_EVQ_IN_FLAG_RPTR_DOS, 0, INIT_EVQ_IN_FLAG_INT_ARMD, 0, INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through, INIT_EVQ_IN_FLAG_RX_MERGE, 1, INIT_EVQ_IN_FLAG_TX_MERGE, 1); /* If the value is zero then disable the timer */ if (us == 0) { MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0); } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail3; MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE, MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks); } MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE, MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail4; } if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { rc = EMSGSIZE; goto fail5; } /* NOTE: ignore the returned IRQ param as firmware does not set it. */ return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_init_evq_v2( __in efx_nic_t *enp, __in unsigned int instance, __in efsys_mem_t *esmp, __in size_t nevs, __in uint32_t irq, __in uint32_t us, __in uint32_t flags) { efx_mcdi_req_t req; uint8_t payload[ MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)), MC_CMD_INIT_EVQ_V2_OUT_LEN)]; boolean_t interrupting; unsigned int evq_type; efx_qword_t *dma_addr; uint64_t addr; int npages; int i; efx_rc_t rc; npages = EFX_EVQ_NBUFS(nevs); if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq); interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { case EFX_EVQ_FLAGS_TYPE_AUTO: evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO; break; case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT; break; case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY; break; default: rc = EINVAL; goto fail2; } MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS, INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting, INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0, INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0, INIT_EVQ_V2_IN_FLAG_TYPE, evq_type); /* If the value is zero then disable the timer */ if (us == 0) { MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0); } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail3; MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks); } MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE, MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS); MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail4; } if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) { rc = EMSGSIZE; goto fail5; } /* NOTE: ignore the returned IRQ param as firmware does not set it. */ EFSYS_PROBE1(mcdi_evq_flags, uint32_t, MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS)); return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_fini_evq( __in efx_nic_t *enp, __in uint32_t instance) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN, MC_CMD_FINI_EVQ_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FINI_EVQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: /* * EALREADY is not an error, but indicates that the MC has rebooted and * that the EVQ has already been destroyed. */ if (rc != EALREADY) EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_ev_init( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) return (0); } void ef10_ev_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } __checkReturn efx_rc_t ef10_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, __in uint32_t us, __in uint32_t flags, __in efx_evq_t *eep) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t irq; efx_rc_t rc; _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS)); EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS)); - if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) { + if (!ISP2(ndescs) || + (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_evq_limit) { rc = EINVAL; goto fail2; } if (us > encp->enc_evq_timer_max_us) { rc = EINVAL; goto fail3; } /* Set up the handler table */ eep->ee_rx = ef10_ev_rx; eep->ee_tx = ef10_ev_tx; eep->ee_driver = ef10_ev_driver; eep->ee_drv_gen = ef10_ev_drv_gen; eep->ee_mcdi = ef10_ev_mcdi; /* Set up the event queue */ /* INIT_EVQ expects function-relative vector number */ if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) { irq = index; } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) { irq = index; flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) | EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; } else { irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX; } /* * Interrupts may be raised for events immediately after the queue is * created. See bug58606. */ if (encp->enc_init_evq_v2_supported) { /* * On Medford the low latency license is required to enable RX * and event cut through and to disable RX batching. If event * queue type in flags is auto, we let the firmware decide the * settings to use. If the adapter has a low latency license, * it will choose the best settings for low latency, otherwise * it will choose the best settings for throughput. */ - rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags); + rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us, + flags); if (rc != 0) goto fail4; } else { /* * On Huntington we need to specify the settings to use. * If event queue type in flags is auto, we favour throughput * if the adapter is running virtualization supporting firmware * (i.e. the full featured firmware variant) * and latency otherwise. The Ethernet Virtual Bridging * capability is used to make this decision. (Note though that * the low latency firmware variant is also best for * throughput and corresponding type should be specified * to choose it.) */ boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1; - rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags, + rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags, low_latency); if (rc != 0) goto fail5; } return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_ev_qdestroy( __in efx_evq_t *eep) { efx_nic_t *enp = eep->ee_enp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); - (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index); + (void) efx_mcdi_fini_evq(enp, eep->ee_index); } __checkReturn efx_rc_t ef10_ev_qprime( __in efx_evq_t *eep, __in unsigned int count) { efx_nic_t *enp = eep->ee_enp; uint32_t rptr; efx_dword_t dword; rptr = count & eep->ee_mask; if (enp->en_nic_cfg.enc_bug35388_workaround) { EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS > (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS < (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_POPULATE_DWORD_2(dword, ERF_DD_EVQ_IND_RPTR_FLAGS, EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, ERF_DD_EVQ_IND_RPTR, (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); EFX_POPULATE_DWORD_2(dword, ERF_DD_EVQ_IND_RPTR_FLAGS, EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, ERF_DD_EVQ_IND_RPTR, rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); } else { EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, &dword, B_FALSE); } return (0); } static __checkReturn efx_rc_t efx_mcdi_driver_event( __in efx_nic_t *enp, __in uint32_t evq, __in efx_qword_t data) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN, MC_CMD_DRIVER_EVENT_OUT_LEN)]; efx_rc_t rc; req.emr_cmd = MC_CMD_DRIVER_EVENT; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN; MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq); MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO, EFX_QWORD_FIELD(data, EFX_DWORD_0)); MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI, EFX_QWORD_FIELD(data, EFX_DWORD_1)); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_ev_qpost( __in efx_evq_t *eep, __in uint16_t data) { efx_nic_t *enp = eep->ee_enp; efx_qword_t event; EFX_POPULATE_QWORD_3(event, ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV, ESF_DZ_DRV_SUB_CODE, 0, ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data); (void) efx_mcdi_driver_event(enp, eep->ee_index, event); } __checkReturn efx_rc_t ef10_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us) { efx_nic_t *enp = eep->ee_enp; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_dword_t dword; uint32_t mode; efx_rc_t rc; /* Check that hardware and MCDI use the same timer MODE values */ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS); EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START); EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START); EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF == MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF); if (us > encp->enc_evq_timer_max_us) { rc = EINVAL; goto fail1; } /* If the value is zero then disable the timer */ if (us == 0) { mode = FFE_CZ_TIMER_MODE_DIS; } else { mode = FFE_CZ_TIMER_MODE_INT_HLDOFF; } if (encp->enc_bug61265_workaround) { uint32_t ns = us * 1000; rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns); if (rc != 0) goto fail2; } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail3; if (encp->enc_bug35388_workaround) { EFX_POPULATE_DWORD_3(dword, ERF_DD_EVQ_IND_TIMER_FLAGS, EFE_DD_EVQ_IND_TIMER_FLAGS, ERF_DD_EVQ_IND_TIMER_MODE, mode, ERF_DD_EVQ_IND_TIMER_VAL, ticks); EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, 0); } else { EFX_POPULATE_DWORD_2(dword, ERF_DZ_TC_TIMER_MODE, mode, ERF_DZ_TC_TIMER_VAL, ticks); EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG, eep->ee_index, &dword, 0); } } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_QSTATS void ef10_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) { unsigned int id; for (id = 0; id < EV_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, eep->ee_stat[id]); eep->ee_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ #if EFSYS_OPT_RX_PACKED_STREAM static __checkReturn boolean_t ef10_ev_rx_packed_stream( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t label; uint32_t pkt_count_lbits; uint16_t flags; boolean_t should_abort; efx_evq_rxq_state_t *eersp; unsigned int pkt_count; unsigned int current_id; boolean_t new_buffer; pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE); flags = 0; eersp = &eep->ee_rxq_state[label]; /* * RX_DSC_PTR_LBITS has least significant bits of the global * (not per-buffer) packet counter. It is guaranteed that * maximum number of completed packets fits in lbits-mask. * So, modulo lbits-mask arithmetic should be used to calculate * packet counter increment. */ pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) & EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); eersp->eers_rx_stream_npackets += pkt_count; if (new_buffer) { flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER; eersp->eers_rx_packed_stream_credits++; eersp->eers_rx_read_ptr++; } current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask; /* Check for errors that invalidate checksum and L3/L4 fields */ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { /* RX frame truncated (error flag is misnamed) */ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); flags |= EFX_DISCARD; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { /* Bad Ethernet frame CRC */ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); flags |= EFX_DISCARD; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); deliver: /* If we're not discarding the packet then it is ok */ if (~flags & EFX_DISCARD) EFX_EV_QSTAT_INCR(eep, EV_RX_OK); EFSYS_ASSERT(eecp->eec_rx_ps != NULL); should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count, flags); return (should_abort); } #endif /* EFSYS_OPT_RX_PACKED_STREAM */ static __checkReturn boolean_t ef10_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; uint32_t size; uint32_t label; uint32_t mac_class; uint32_t eth_tag_class; uint32_t l3_class; uint32_t l4_class; uint32_t next_read_lbits; uint16_t flags; boolean_t cont; boolean_t should_abort; efx_evq_rxq_state_t *eersp; unsigned int desc_count; unsigned int last_used_id; EFX_EV_QSTAT_INCR(eep, EV_RX); /* Discard events after RXQ/TXQ errors */ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) return (B_FALSE); /* Basic packet information */ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); eersp = &eep->ee_rxq_state[label]; #if EFSYS_OPT_RX_PACKED_STREAM /* * Packed stream events are very different, * so handle them separately */ if (eersp->eers_rx_packed_stream) return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg)); #endif size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS); cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { /* Drop this event */ return (B_FALSE); } flags = 0; if (cont != 0) { /* * This may be part of a scattered frame, or it may be a * truncated frame if scatter is disabled on this RXQ. * Overlength frames can be received if e.g. a VF is configured * for 1500 MTU but connected to a port set to 9000 MTU * (see bug56567). * FIXME: There is not yet any driver that supports scatter on * Huntington. Scatter support is required for OSX. */ flags |= EFX_PKT_CONT; } if (mac_class == ESE_DZ_MAC_CLASS_UCAST) flags |= EFX_PKT_UNICAST; /* Increment the count of descriptors read */ desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) & EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); eersp->eers_rx_read_ptr += desc_count; /* * FIXME: add error checking to make sure this a batched event. * This could also be an aborted scatter, see Bug36629. */ if (desc_count > 1) { EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); flags |= EFX_PKT_PREFIX_LEN; } /* Calculate the index of the last descriptor consumed */ last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; /* Check for errors that invalidate checksum and L3/L4 fields */ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { /* RX frame truncated (error flag is misnamed) */ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); flags |= EFX_DISCARD; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { /* Bad Ethernet frame CRC */ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); flags |= EFX_DISCARD; goto deliver; } if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { /* * Hardware parse failed, due to malformed headers * or headers that are too long for the parser. * Headers and checksums must be validated by the host. */ /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */ goto deliver; } if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) || (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) { flags |= EFX_PKT_VLAN_TAGGED; } switch (l3_class) { case ESE_DZ_L3_CLASS_IP4: case ESE_DZ_L3_CLASS_IP4_FRAG: flags |= EFX_PKT_IPV4; if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) { EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); } else { flags |= EFX_CKSUM_IPV4; } if (l4_class == ESE_DZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); flags |= EFX_PKT_TCP; } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); flags |= EFX_PKT_UDP; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); } break; case ESE_DZ_L3_CLASS_IP6: case ESE_DZ_L3_CLASS_IP6_FRAG: flags |= EFX_PKT_IPV6; if (l4_class == ESE_DZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); flags |= EFX_PKT_TCP; } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); flags |= EFX_PKT_UDP; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); } break; default: EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); break; } if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) { if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); } else { flags |= EFX_CKSUM_TCPUDP; } } deliver: /* If we're not discarding the packet then it is ok */ if (~flags & EFX_DISCARD) EFX_EV_QSTAT_INCR(eep, EV_RX_OK); EFSYS_ASSERT(eecp->eec_rx != NULL); should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags); return (should_abort); } static __checkReturn boolean_t ef10_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; uint32_t id; uint32_t label; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_TX); /* Discard events after RXQ/TXQ errors */ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR)) return (B_FALSE); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) { /* Drop this event */ return (B_FALSE); } /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX); label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL); EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); EFSYS_ASSERT(eecp->eec_tx != NULL); should_abort = eecp->eec_tx(arg, label, id); return (should_abort); } static __checkReturn boolean_t ef10_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { unsigned int code; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRIVER); should_abort = B_FALSE; code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE); switch (code) { case ESE_DZ_DRV_TIMER_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID); EFSYS_ASSERT(eecp->eec_timer != NULL); should_abort = eecp->eec_timer(arg, id); break; } case ESE_DZ_DRV_WAKE_UP_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID); EFSYS_ASSERT(eecp->eec_wake_up != NULL); should_abort = eecp->eec_wake_up(arg, id); break; } case ESE_DZ_DRV_START_UP_EV: EFSYS_ASSERT(eecp->eec_initialized != NULL); should_abort = eecp->eec_initialized(arg); break; default: EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); break; } return (should_abort); } static __checkReturn boolean_t ef10_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t data; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); should_abort = B_FALSE; data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0); if (data >= ((uint32_t)1 << 16)) { EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); return (B_TRUE); } EFSYS_ASSERT(eecp->eec_software != NULL); should_abort = eecp->eec_software(arg, (uint16_t)data); return (should_abort); } static __checkReturn boolean_t ef10_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; unsigned int code; boolean_t should_abort = B_FALSE; EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); switch (code) { case MCDI_EVENT_CODE_BADSSERT: efx_mcdi_ev_death(enp, EINTR); break; case MCDI_EVENT_CODE_CMDDONE: efx_mcdi_ev_cpl(enp, MCDI_EV_FIELD(eqp, CMDDONE_SEQ), MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); break; #if EFSYS_OPT_MCDI_PROXY_AUTH case MCDI_EVENT_CODE_PROXY_RESPONSE: /* * This event notifies a function that an authorization request * has been processed. If the request was authorized then the * function can now re-send the original MCDI request. * See SF-113652-SW "SR-IOV Proxied Network Access Control". */ efx_mcdi_ev_proxy_response(enp, MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE), MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC)); break; #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ case MCDI_EVENT_CODE_LINKCHANGE: { efx_link_mode_t link_mode; ef10_phy_link_ev(enp, eqp, &link_mode); should_abort = eecp->eec_link_change(arg, link_mode); break; } case MCDI_EVENT_CODE_SENSOREVT: { #if EFSYS_OPT_MON_STATS efx_mon_stat_t id; efx_mon_stat_value_t value; efx_rc_t rc; /* Decode monitor stat for MCDI sensor (if supported) */ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) { /* Report monitor stat change */ should_abort = eecp->eec_monitor(arg, id, value); } else if (rc == ENOTSUP) { should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_SENSOREVT, MCDI_EV_FIELD(eqp, DATA)); } else { EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ } #endif break; } case MCDI_EVENT_CODE_SCHEDERR: /* Informational only */ break; case MCDI_EVENT_CODE_REBOOT: /* Falcon/Siena only (should not been seen with Huntington). */ efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MC_REBOOT: /* MC_REBOOT event is used for Huntington (EF10) and later. */ efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: #if EFSYS_OPT_MAC_STATS if (eecp->eec_mac_stats != NULL) { eecp->eec_mac_stats(arg, MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); } #endif break; case MCDI_EVENT_CODE_FWALERT: { uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_FWALERT_SRAM, MCDI_EV_FIELD(eqp, FWALERT_DATA)); else should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_FWALERT, MCDI_EV_FIELD(eqp, DATA)); break; } case MCDI_EVENT_CODE_TX_ERR: { /* * After a TXQ error is detected, firmware sends a TX_ERR event. * This may be followed by TX completions (which we discard), * and then finally by a TX_FLUSH event. Firmware destroys the * TXQ automatically after sending the TX_FLUSH event. */ enp->en_reset_flags |= EFX_RESET_TXQ_ERR; EFSYS_PROBE2(tx_descq_err, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); /* Inform the driver that a reset is required. */ eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR, MCDI_EV_FIELD(eqp, TX_ERR_DATA)); break; } case MCDI_EVENT_CODE_TX_FLUSH: { uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ); /* * EF10 firmware sends two TX_FLUSH events: one to the txq's * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set). * We want to wait for all completions, so ignore the events * with TX_FLUSH_TO_DRIVER. */ if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) { should_abort = B_FALSE; break; } EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); should_abort = eecp->eec_txq_flush_done(arg, txq_index); break; } case MCDI_EVENT_CODE_RX_ERR: { /* * After an RXQ error is detected, firmware sends an RX_ERR * event. This may be followed by RX events (which we discard), * and then finally by an RX_FLUSH event. Firmware destroys the * RXQ automatically after sending the RX_FLUSH event. */ enp->en_reset_flags |= EFX_RESET_RXQ_ERR; EFSYS_PROBE2(rx_descq_err, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); /* Inform the driver that a reset is required. */ eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR, MCDI_EV_FIELD(eqp, RX_ERR_DATA)); break; } case MCDI_EVENT_CODE_RX_FLUSH: { uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ); /* * EF10 firmware sends two RX_FLUSH events: one to the rxq's * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set). * We want to wait for all completions, so ignore the events * with RX_FLUSH_TO_DRIVER. */ if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) { should_abort = B_FALSE; break; } EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); break; } default: EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); break; } return (should_abort); } void ef10_ev_rxlabel_init( __in efx_evq_t *eep, __in efx_rxq_t *erp, __in unsigned int label, __in efx_rxq_type_t type) { efx_evq_rxq_state_t *eersp; - boolean_t packed_stream = (type >= EFX_RXQ_TYPE_PACKED_STREAM_1M) && - (type <= EFX_RXQ_TYPE_PACKED_STREAM_64K); +#if EFSYS_OPT_RX_PACKED_STREAM + boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM); +#endif + _NOTE(ARGUNUSED(type)) EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); eersp = &eep->ee_rxq_state[label]; EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0); #if EFSYS_OPT_RX_PACKED_STREAM /* * For packed stream modes, the very first event will * have a new buffer flag set, so it will be incremented, * yielding the correct pointer. That results in a simpler * code than trying to detect start-of-the-world condition * in the event handler. */ eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0; #else eersp->eers_rx_read_ptr = 0; #endif eersp->eers_rx_mask = erp->er_mask; #if EFSYS_OPT_RX_PACKED_STREAM eersp->eers_rx_stream_npackets = 0; eersp->eers_rx_packed_stream = packed_stream; if (packed_stream) { eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) / EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT, EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE); EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0); /* * A single credit is allocated to the queue when it is started. * It is immediately spent by the first packet which has NEW * BUFFER flag set, though, but still we shall take into * account, as to not wrap around the maximum number of credits * accidentally */ eersp->eers_rx_packed_stream_credits--; EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=, EFX_RX_PACKED_STREAM_MAX_CREDITS); } -#else - EFSYS_ASSERT(!packed_stream); #endif } void ef10_ev_rxlabel_fini( __in efx_evq_t *eep, __in unsigned int label) { efx_evq_rxq_state_t *eersp; EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); eersp = &eep->ee_rxq_state[label]; EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0); eersp->eers_rx_read_ptr = 0; eersp->eers_rx_mask = 0; #if EFSYS_OPT_RX_PACKED_STREAM eersp->eers_rx_stream_npackets = 0; eersp->eers_rx_packed_stream = B_FALSE; eersp->eers_rx_packed_stream_credits = 0; #endif } #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: projects/clang700-import/sys/dev/sfxge/common/ef10_filter.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/ef10_filter.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/ef10_filter.c (revision 340918) @@ -1,1666 +1,1666 @@ /*- * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_FILTER #define EFE_SPEC(eftp, index) ((eftp)->eft_entry[(index)].efe_spec) static efx_filter_spec_t * ef10_filter_entry_spec( __in const ef10_filter_table_t *eftp, __in unsigned int index) { return ((efx_filter_spec_t *)(EFE_SPEC(eftp, index) & ~(uintptr_t)EFX_EF10_FILTER_FLAGS)); } static boolean_t ef10_filter_entry_is_busy( __in const ef10_filter_table_t *eftp, __in unsigned int index) { if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_BUSY) return (B_TRUE); else return (B_FALSE); } static boolean_t ef10_filter_entry_is_auto_old( __in const ef10_filter_table_t *eftp, __in unsigned int index) { if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_AUTO_OLD) return (B_TRUE); else return (B_FALSE); } static void ef10_filter_set_entry( __inout ef10_filter_table_t *eftp, __in unsigned int index, __in_opt const efx_filter_spec_t *efsp) { EFE_SPEC(eftp, index) = (uintptr_t)efsp; } static void ef10_filter_set_entry_busy( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_BUSY; } static void ef10_filter_set_entry_not_busy( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_BUSY; } static void ef10_filter_set_entry_auto_old( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL); EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD; } static void ef10_filter_set_entry_not_auto_old( __inout ef10_filter_table_t *eftp, __in unsigned int index) { EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD; EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL); } __checkReturn efx_rc_t ef10_filter_init( __in efx_nic_t *enp) { efx_rc_t rc; ef10_filter_table_t *eftp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); #define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match)) EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_UNKNOWN_MCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST)); EFX_STATIC_ASSERT((uint32_t)EFX_FILTER_MATCH_UNKNOWN_UCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST)); #undef MATCH_MASK EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp); if (!eftp) { rc = ENOMEM; goto fail1; } enp->en_filter.ef_ef10_filter_table = eftp; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_filter_fini( __in efx_nic_t *enp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); if (enp->en_filter.ef_ef10_filter_table != NULL) { EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t), enp->en_filter.ef_ef10_filter_table); } } static __checkReturn efx_rc_t efx_mcdi_filter_op_add( __in efx_nic_t *enp, __in efx_filter_spec_t *spec, __in unsigned int filter_op, __inout ef10_filter_handle_t *handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN, MC_CMD_FILTER_OP_EXT_OUT_LEN)]; efx_rc_t rc; memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FILTER_OP; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN; switch (filter_op) { case MC_CMD_FILTER_OP_IN_OP_REPLACE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO, handle->efh_lo); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_HI, handle->efh_hi); /* Fall through */ case MC_CMD_FILTER_OP_IN_OP_INSERT: case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, filter_op); break; default: EFSYS_ASSERT(0); rc = EINVAL; goto fail1; } MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_MATCH_FIELDS, spec->efs_match_flags); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST, MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE, spec->efs_dmaq_id); #if EFSYS_OPT_RX_SCALE if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) { uint32_t rss_context; if (spec->efs_rss_context == EFX_RSS_CONTEXT_DEFAULT) rss_context = enp->en_rss_context; else rss_context = spec->efs_rss_context; MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_CONTEXT, rss_context); } #endif MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_MODE, spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ? MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS : MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_TX_DEST, MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT); if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) { /* * NOTE: Unlike most MCDI requests, the filter fields * are presented in network (big endian) byte order. */ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_MAC), spec->efs_rem_mac, EFX_MAC_ADDR_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_MAC), spec->efs_loc_mac, EFX_MAC_ADDR_LEN); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_SRC_PORT, __CPU_TO_BE_16(spec->efs_rem_port)); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_DST_PORT, __CPU_TO_BE_16(spec->efs_loc_port)); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_ETHER_TYPE, __CPU_TO_BE_16(spec->efs_ether_type)); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_INNER_VLAN, __CPU_TO_BE_16(spec->efs_inner_vid)); MCDI_IN_SET_WORD(req, FILTER_OP_EXT_IN_OUTER_VLAN, __CPU_TO_BE_16(spec->efs_outer_vid)); /* IP protocol (in low byte, high byte is zero) */ MCDI_IN_SET_BYTE(req, FILTER_OP_EXT_IN_IP_PROTO, spec->efs_ip_proto); EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) == MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN); EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) == MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_SRC_IP), &spec->efs_rem_host.eo_byte[0], MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN); memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_DST_IP), &spec->efs_loc_host.eo_byte[0], MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN); /* * On Medford, filters for encapsulated packets match based on * the ether type and IP protocol in the outer frame. In * addition we need to fill in the VNI or VSID type field. */ switch (spec->efs_encap_type) { case EFX_TUNNEL_PROTOCOL_NONE: break; case EFX_TUNNEL_PROTOCOL_VXLAN: case EFX_TUNNEL_PROTOCOL_GENEVE: MCDI_IN_POPULATE_DWORD_1(req, FILTER_OP_EXT_IN_VNI_OR_VSID, FILTER_OP_EXT_IN_VNI_TYPE, spec->efs_encap_type == EFX_TUNNEL_PROTOCOL_VXLAN ? MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN : MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE); break; case EFX_TUNNEL_PROTOCOL_NVGRE: MCDI_IN_POPULATE_DWORD_1(req, FILTER_OP_EXT_IN_VNI_OR_VSID, FILTER_OP_EXT_IN_VSID_TYPE, MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE); break; default: EFSYS_ASSERT(0); rc = EINVAL; goto fail2; } } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) { rc = EMSGSIZE; goto fail4; } handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_LO); handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_HI); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_filter_op_delete( __in efx_nic_t *enp, __in unsigned int filter_op, __inout ef10_filter_handle_t *handle) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN, MC_CMD_FILTER_OP_EXT_OUT_LEN)]; efx_rc_t rc; memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FILTER_OP; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN; switch (filter_op) { case MC_CMD_FILTER_OP_IN_OP_REMOVE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, MC_CMD_FILTER_OP_IN_OP_REMOVE); break; case MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_OP, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); break; default: EFSYS_ASSERT(0); rc = EINVAL; goto fail1; } MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO, handle->efh_lo); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_HI, handle->efh_hi); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) { rc = EMSGSIZE; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn boolean_t ef10_filter_equal( __in const efx_filter_spec_t *left, __in const efx_filter_spec_t *right) { /* FIXME: Consider rx vs tx filters (look at efs_flags) */ if (left->efs_match_flags != right->efs_match_flags) return (B_FALSE); if (!EFX_OWORD_IS_EQUAL(left->efs_rem_host, right->efs_rem_host)) return (B_FALSE); if (!EFX_OWORD_IS_EQUAL(left->efs_loc_host, right->efs_loc_host)) return (B_FALSE); if (memcmp(left->efs_rem_mac, right->efs_rem_mac, EFX_MAC_ADDR_LEN)) return (B_FALSE); if (memcmp(left->efs_loc_mac, right->efs_loc_mac, EFX_MAC_ADDR_LEN)) return (B_FALSE); if (left->efs_rem_port != right->efs_rem_port) return (B_FALSE); if (left->efs_loc_port != right->efs_loc_port) return (B_FALSE); if (left->efs_inner_vid != right->efs_inner_vid) return (B_FALSE); if (left->efs_outer_vid != right->efs_outer_vid) return (B_FALSE); if (left->efs_ether_type != right->efs_ether_type) return (B_FALSE); if (left->efs_ip_proto != right->efs_ip_proto) return (B_FALSE); if (left->efs_encap_type != right->efs_encap_type) return (B_FALSE); return (B_TRUE); } static __checkReturn boolean_t ef10_filter_same_dest( __in const efx_filter_spec_t *left, __in const efx_filter_spec_t *right) { if ((left->efs_flags & EFX_FILTER_FLAG_RX_RSS) && (right->efs_flags & EFX_FILTER_FLAG_RX_RSS)) { if (left->efs_rss_context == right->efs_rss_context) return (B_TRUE); } else if ((~(left->efs_flags) & EFX_FILTER_FLAG_RX_RSS) && (~(right->efs_flags) & EFX_FILTER_FLAG_RX_RSS)) { if (left->efs_dmaq_id == right->efs_dmaq_id) return (B_TRUE); } return (B_FALSE); } static __checkReturn uint32_t ef10_filter_hash( __in efx_filter_spec_t *spec) { EFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t)) == 0); EFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) % sizeof (uint32_t)) == 0); /* * As the area of the efx_filter_spec_t we need to hash is DWORD * aligned and an exact number of DWORDs in size we can use the * optimised efx_hash_dwords() rather than efx_hash_bytes() */ return (efx_hash_dwords((const uint32_t *)&spec->efs_outer_vid, (sizeof (efx_filter_spec_t) - EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) / sizeof (uint32_t), 0)); } /* * Decide whether a filter should be exclusive or else should allow * delivery to additional recipients. Currently we decide that * filters for specific local unicast MAC and IP addresses are * exclusive. */ static __checkReturn boolean_t ef10_filter_is_exclusive( __in efx_filter_spec_t *spec) { if ((spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC) && !EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac)) return (B_TRUE); if ((spec->efs_match_flags & (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV4) && ((spec->efs_loc_host.eo_u8[0] & 0xf) != 0xe)) return (B_TRUE); if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV6) && (spec->efs_loc_host.eo_u8[0] != 0xff)) return (B_TRUE); } return (B_FALSE); } __checkReturn efx_rc_t ef10_filter_restore( __in efx_nic_t *enp) { int tbl_id; efx_filter_spec_t *spec; ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; boolean_t restoring; efsys_lock_state_t state; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) { EFSYS_LOCK(enp->en_eslp, state); spec = ef10_filter_entry_spec(eftp, tbl_id); if (spec == NULL) { restoring = B_FALSE; } else if (ef10_filter_entry_is_busy(eftp, tbl_id)) { /* Ignore busy entries. */ restoring = B_FALSE; } else { ef10_filter_set_entry_busy(eftp, tbl_id); restoring = B_TRUE; } EFSYS_UNLOCK(enp->en_eslp, state); if (restoring == B_FALSE) continue; if (ef10_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_INSERT, &eftp->eft_entry[tbl_id].efe_handle); } else { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE, &eftp->eft_entry[tbl_id].efe_handle); } if (rc != 0) goto fail1; EFSYS_LOCK(enp->en_eslp, state); ef10_filter_set_entry_not_busy(eftp, tbl_id); EFSYS_UNLOCK(enp->en_eslp, state); } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * An arbitrary search limit for the software hash table. As per the linux net * driver. */ #define EF10_FILTER_SEARCH_LIMIT 200 static __checkReturn efx_rc_t ef10_filter_add_internal( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace, __out_opt uint32_t *filter_id) { efx_rc_t rc; ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t *saved_spec; uint32_t hash; unsigned int depth; int ins_index; boolean_t replacing = B_FALSE; unsigned int i; efsys_lock_state_t state; boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); hash = ef10_filter_hash(spec); /* * FIXME: Add support for inserting filters of different priorities * and removing lower priority multicast filters (bug 42378) */ /* * Find any existing filters with the same match tuple or * else a free slot to insert at. If any of them are busy, * we have to wait and retry. */ for (;;) { ins_index = -1; depth = 1; EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; for (;;) { i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1); saved_spec = ef10_filter_entry_spec(eftp, i); if (!saved_spec) { if (ins_index < 0) { ins_index = i; } } else if (ef10_filter_equal(spec, saved_spec)) { if (ef10_filter_entry_is_busy(eftp, i)) break; if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) { ins_index = i; goto found; } else if (ef10_filter_is_exclusive(spec)) { if (may_replace) { ins_index = i; goto found; } else { rc = EEXIST; goto fail1; } } /* Leave existing */ } /* * Once we reach the maximum search depth, use * the first suitable slot or return EBUSY if * there was none. */ if (depth == EF10_FILTER_SEARCH_LIMIT) { if (ins_index < 0) { rc = EBUSY; goto fail2; } goto found; } depth++; } EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; } found: /* * Create a software table entry if necessary, and mark it * busy. We might yet fail to insert, but any attempt to * insert a conflicting filter while we're waiting for the * firmware must find the busy entry. */ saved_spec = ef10_filter_entry_spec(eftp, ins_index); if (saved_spec) { if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) { /* This is a filter we are refreshing */ ef10_filter_set_entry_not_auto_old(eftp, ins_index); goto out_unlock; } replacing = B_TRUE; } else { EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), saved_spec); if (!saved_spec) { rc = ENOMEM; goto fail3; } *saved_spec = *spec; ef10_filter_set_entry(eftp, ins_index, saved_spec); } ef10_filter_set_entry_busy(eftp, ins_index); EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; /* * On replacing the filter handle may change after after a successful * replace operation. */ if (replacing) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_REPLACE, &eftp->eft_entry[ins_index].efe_handle); } else if (ef10_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_INSERT, &eftp->eft_entry[ins_index].efe_handle); } else { rc = efx_mcdi_filter_op_add(enp, spec, MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE, &eftp->eft_entry[ins_index].efe_handle); } if (rc != 0) goto fail4; EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; if (replacing) { /* Update the fields that may differ */ saved_spec->efs_priority = spec->efs_priority; saved_spec->efs_flags = spec->efs_flags; saved_spec->efs_rss_context = spec->efs_rss_context; saved_spec->efs_dmaq_id = spec->efs_dmaq_id; } ef10_filter_set_entry_not_busy(eftp, ins_index); out_unlock: EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; if (filter_id) *filter_id = ins_index; return (0); fail4: EFSYS_PROBE(fail4); if (!replacing) { EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), saved_spec); saved_spec = NULL; } ef10_filter_set_entry_not_busy(eftp, ins_index); ef10_filter_set_entry(eftp, ins_index, NULL); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); if (locked) EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } __checkReturn efx_rc_t ef10_filter_add( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace) { efx_rc_t rc; rc = ef10_filter_add_internal(enp, spec, may_replace, NULL); if (rc != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_delete_internal( __in efx_nic_t *enp, __in uint32_t filter_id) { efx_rc_t rc; ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t *spec; efsys_lock_state_t state; uint32_t filter_idx = filter_id % EFX_EF10_FILTER_TBL_ROWS; /* * Find the software table entry and mark it busy. Don't * remove it yet; any attempt to update while we're waiting * for the firmware must find the busy entry. * * FIXME: What if the busy flag is never cleared? */ EFSYS_LOCK(enp->en_eslp, state); while (ef10_filter_entry_is_busy(table, filter_idx)) { EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_SPIN(1); EFSYS_LOCK(enp->en_eslp, state); } if ((spec = ef10_filter_entry_spec(table, filter_idx)) != NULL) { ef10_filter_set_entry_busy(table, filter_idx); } EFSYS_UNLOCK(enp->en_eslp, state); if (spec == NULL) { rc = ENOENT; goto fail1; } /* * Try to remove the hardware filter. This may fail if the MC has * rebooted (which frees all hardware filter resources). */ if (ef10_filter_is_exclusive(spec)) { rc = efx_mcdi_filter_op_delete(enp, MC_CMD_FILTER_OP_IN_OP_REMOVE, &table->eft_entry[filter_idx].efe_handle); } else { rc = efx_mcdi_filter_op_delete(enp, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE, &table->eft_entry[filter_idx].efe_handle); } /* Free the software table entry */ EFSYS_LOCK(enp->en_eslp, state); ef10_filter_set_entry_not_busy(table, filter_idx); ef10_filter_set_entry(table, filter_idx, NULL); EFSYS_UNLOCK(enp->en_eslp, state); EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec); /* Check result of hardware filter removal */ if (rc != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_filter_delete( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec) { efx_rc_t rc; ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t *saved_spec; unsigned int hash; unsigned int depth; unsigned int i; efsys_lock_state_t state; boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || enp->en_family == EFX_FAMILY_MEDFORD); hash = ef10_filter_hash(spec); EFSYS_LOCK(enp->en_eslp, state); locked = B_TRUE; depth = 1; for (;;) { i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1); saved_spec = ef10_filter_entry_spec(table, i); if (saved_spec && ef10_filter_equal(spec, saved_spec) && ef10_filter_same_dest(spec, saved_spec)) { break; } if (depth == EF10_FILTER_SEARCH_LIMIT) { rc = ENOENT; goto fail1; } depth++; } EFSYS_UNLOCK(enp->en_eslp, state); locked = B_FALSE; rc = ef10_filter_delete_internal(enp, i); if (rc != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); if (locked) EFSYS_UNLOCK(enp->en_eslp, state); return (rc); } static __checkReturn efx_rc_t efx_mcdi_get_parser_disp_info( __in efx_nic_t *enp, __out_ecount(buffer_length) uint32_t *buffer, __in size_t buffer_length, __out size_t *list_lengthp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)]; size_t matches_count; size_t list_size; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX; MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP, MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } matches_count = MCDI_OUT_DWORD(req, GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES); if (req.emr_out_length_used < MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(matches_count)) { rc = EMSGSIZE; goto fail2; } *list_lengthp = matches_count; if (buffer_length < matches_count) { rc = ENOSPC; goto fail3; } /* * Check that the elements in the list in the MCDI response are the size * we expect, so we can just copy them directly. Any conversion of the * flags is handled by the caller. */ EFX_STATIC_ASSERT(sizeof (uint32_t) == MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN); list_size = matches_count * MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN; memcpy(buffer, MCDI_OUT2(req, uint32_t, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES), list_size); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_filter_supported_filters( __in efx_nic_t *enp, __out_ecount(buffer_length) uint32_t *buffer, __in size_t buffer_length, __out size_t *list_lengthp) { size_t mcdi_list_length; size_t list_length; uint32_t i; efx_rc_t rc; - uint32_t all_filter_flags = + efx_filter_match_flags_t all_filter_flags = (EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_REM_PORT | EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_INNER_VID | EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_UNKNOWN_MCAST_DST | EFX_FILTER_MATCH_UNKNOWN_UCAST_DST); rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length, &mcdi_list_length); if (rc != 0) { if (rc == ENOSPC) { /* Pass through mcdi_list_length for the list length */ *list_lengthp = mcdi_list_length; } goto fail1; } /* * The static assertions in ef10_filter_init() ensure that the values of * the EFX_FILTER_MATCH flags match those used by MCDI, so they don't * need to be converted. * * In case support is added to MCDI for additional flags, remove any * matches from the list which include flags we don't support. The order * of the matches is preserved as they are ordered from highest to * lowest priority. */ EFSYS_ASSERT(mcdi_list_length <= buffer_length); list_length = 0; for (i = 0; i < mcdi_list_length; i++) { if ((buffer[i] & ~all_filter_flags) == 0) { buffer[list_length] = buffer[i]; list_length++; } } *list_lengthp = list_length; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_unicast( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *addr, __in efx_filter_flags_t filter_flags) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; efx_rc_t rc; /* Insert the filter for the local station address */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]); if (rc != 0) goto fail1; eftp->eft_unicst_filter_count++; EFSYS_ASSERT(eftp->eft_unicst_filter_count <= EFX_EF10_FILTER_UNICAST_FILTERS_MAX); return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_all_unicast( __in efx_nic_t *enp, __in efx_filter_flags_t filter_flags) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; efx_rc_t rc; /* Insert the unknown unicast filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); efx_filter_spec_set_uc_def(&spec); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]); if (rc != 0) goto fail1; eftp->eft_unicst_filter_count++; EFSYS_ASSERT(eftp->eft_unicst_filter_count <= EFX_EF10_FILTER_UNICAST_FILTERS_MAX); return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_multicast_list( __in efx_nic_t *enp, __in boolean_t mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in uint32_t count, __in efx_filter_flags_t filter_flags, __in boolean_t rollback) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; uint8_t addr[6]; uint32_t i; uint32_t filter_index; uint32_t filter_count; efx_rc_t rc; if (mulcst == B_FALSE) count = 0; if (count + (brdcst ? 1 : 0) > EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) { /* Too many MAC addresses */ rc = EINVAL; goto fail1; } /* Insert/renew multicast address list filters */ filter_count = 0; for (i = 0; i < count; i++) { efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, &addrs[i * EFX_MAC_ADDR_LEN]); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &filter_index); if (rc == 0) { eftp->eft_mulcst_filter_indexes[filter_count] = filter_index; filter_count++; } else if (rollback == B_TRUE) { /* Only stop upon failure if told to rollback */ goto rollback; } } if (brdcst == B_TRUE) { /* Insert/renew broadcast address filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); EFX_MAC_BROADCAST_ADDR_SET(addr); efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &filter_index); if (rc == 0) { eftp->eft_mulcst_filter_indexes[filter_count] = filter_index; filter_count++; } else if (rollback == B_TRUE) { /* Only stop upon failure if told to rollback */ goto rollback; } } eftp->eft_mulcst_filter_count = filter_count; eftp->eft_using_all_mulcst = B_FALSE; return (0); rollback: /* Remove any filters we have inserted */ i = filter_count; while (i--) { (void) ef10_filter_delete_internal(enp, eftp->eft_mulcst_filter_indexes[i]); } eftp->eft_mulcst_filter_count = 0; fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t ef10_filter_insert_all_multicast( __in efx_nic_t *enp, __in efx_filter_flags_t filter_flags) { ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table; efx_filter_spec_t spec; efx_rc_t rc; /* Insert the unknown multicast filter */ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, eftp->eft_default_rxq); efx_filter_spec_set_mc_def(&spec); rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &eftp->eft_mulcst_filter_indexes[0]); if (rc != 0) goto fail1; eftp->eft_mulcst_filter_count = 1; eftp->eft_using_all_mulcst = B_TRUE; /* * FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic. */ return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } typedef struct ef10_filter_encap_entry_s { uint16_t ether_type; efx_tunnel_protocol_t encap_type; uint32_t inner_frame_match; } ef10_filter_encap_entry_t; -#define EF10_ENCAP_FILTER_ENTRY(ipv, encap_type, inner_frame_match) \ - { EFX_ETHER_TYPE_##ipv, EFX_TUNNEL_PROTOCOL_##encap_type, \ +#define EF10_ENCAP_FILTER_ENTRY(ipv, encap_type, inner_frame_match) \ + { EFX_ETHER_TYPE_##ipv, EFX_TUNNEL_PROTOCOL_##encap_type, \ EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_##inner_frame_match } static ef10_filter_encap_entry_t ef10_filter_encap_list[] = { EF10_ENCAP_FILTER_ENTRY(IPV4, VXLAN, UCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV4, VXLAN, MCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV6, VXLAN, UCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV6, VXLAN, MCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV4, GENEVE, UCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV4, GENEVE, MCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV6, GENEVE, UCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV6, GENEVE, MCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV4, NVGRE, UCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV4, NVGRE, MCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV6, NVGRE, UCAST_DST), EF10_ENCAP_FILTER_ENTRY(IPV6, NVGRE, MCAST_DST), }; #undef EF10_ENCAP_FILTER_ENTRY static __checkReturn efx_rc_t ef10_filter_insert_encap_filters( __in efx_nic_t *enp, __in boolean_t mulcst, __in efx_filter_flags_t filter_flags) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; uint32_t i; efx_rc_t rc; EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(ef10_filter_encap_list) <= EFX_ARRAY_SIZE(table->eft_encap_filter_indexes)); /* * On Medford, full-featured firmware can identify packets as being * tunnel encapsulated, even if no encapsulated packet offloads are in * use. When packets are identified as such, ordinary filters are not * applied, only ones specific to encapsulated packets. Hence we need to * insert filters for encapsulated packets in order to receive them. * * Separate filters need to be inserted for each ether type, * encapsulation type, and inner frame type (unicast or multicast). To * keep things simple and reduce the number of filters needed, catch-all * filters for all combinations of types are inserted, even if * all_unicst or all_mulcst have not been set. (These catch-all filters * may well, however, fail to insert on unprivileged functions.) */ table->eft_encap_filter_count = 0; for (i = 0; i < EFX_ARRAY_SIZE(ef10_filter_encap_list); i++) { efx_filter_spec_t spec; ef10_filter_encap_entry_t *encap_filter = &ef10_filter_encap_list[i]; /* * Skip multicast filters if we've not been asked for * any multicast traffic. */ if ((mulcst == B_FALSE) && (encap_filter->inner_frame_match == - EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST)) - continue; + EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST)) + continue; efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, table->eft_default_rxq); efx_filter_spec_set_ether_type(&spec, encap_filter->ether_type); rc = efx_filter_spec_set_encap_type(&spec, encap_filter->encap_type, encap_filter->inner_frame_match); if (rc != 0) goto fail1; rc = ef10_filter_add_internal(enp, &spec, B_TRUE, &table->eft_encap_filter_indexes[ table->eft_encap_filter_count]); if (rc != 0) { if (rc != EACCES) goto fail2; } else { table->eft_encap_filter_count++; } } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static void ef10_filter_remove_old( __in efx_nic_t *enp) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; uint32_t i; for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) { if (ef10_filter_entry_is_auto_old(table, i)) { (void) ef10_filter_delete_internal(enp, i); } } } static __checkReturn efx_rc_t ef10_filter_get_workarounds( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &enp->en_nic_cfg; uint32_t implemented = 0; uint32_t enabled = 0; efx_rc_t rc; rc = efx_mcdi_get_workarounds(enp, &implemented, &enabled); if (rc == 0) { /* Check if chained multicast filter support is enabled */ if (implemented & enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807) encp->enc_bug26807_workaround = B_TRUE; else encp->enc_bug26807_workaround = B_FALSE; } else if (rc == ENOTSUP) { /* * Firmware is too old to support GET_WORKAROUNDS, and support * for this workaround was implemented later. */ encp->enc_bug26807_workaround = B_FALSE; } else { goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Reconfigure all filters. * If all_unicst and/or all mulcst filters cannot be applied then * return ENOTSUP (Note the filters for the specified addresses are * still applied in this case). */ __checkReturn efx_rc_t ef10_filter_reconfigure( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *mac_addr, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in uint32_t count) { efx_nic_cfg_t *encp = &enp->en_nic_cfg; ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; efx_filter_flags_t filter_flags; unsigned int i; efx_rc_t all_unicst_rc = 0; efx_rc_t all_mulcst_rc = 0; efx_rc_t rc; if (table->eft_default_rxq == NULL) { /* * Filters direct traffic to the default RXQ, and so cannot be * inserted until it is available. Any currently configured * filters must be removed (ignore errors in case the MC * has rebooted, which removes hardware filters). */ for (i = 0; i < table->eft_unicst_filter_count; i++) { (void) ef10_filter_delete_internal(enp, table->eft_unicst_filter_indexes[i]); } table->eft_unicst_filter_count = 0; for (i = 0; i < table->eft_mulcst_filter_count; i++) { (void) ef10_filter_delete_internal(enp, table->eft_mulcst_filter_indexes[i]); } table->eft_mulcst_filter_count = 0; for (i = 0; i < table->eft_encap_filter_count; i++) { (void) ef10_filter_delete_internal(enp, table->eft_encap_filter_indexes[i]); } table->eft_encap_filter_count = 0; return (0); } if (table->eft_using_rss) filter_flags = EFX_FILTER_FLAG_RX_RSS; else filter_flags = 0; /* Mark old filters which may need to be removed */ for (i = 0; i < table->eft_unicst_filter_count; i++) { ef10_filter_set_entry_auto_old(table, table->eft_unicst_filter_indexes[i]); } for (i = 0; i < table->eft_mulcst_filter_count; i++) { ef10_filter_set_entry_auto_old(table, table->eft_mulcst_filter_indexes[i]); } for (i = 0; i < table->eft_encap_filter_count; i++) { ef10_filter_set_entry_auto_old(table, table->eft_encap_filter_indexes[i]); } /* * Insert or renew unicast filters. * * Frimware does not perform chaining on unicast filters. As traffic is * therefore only delivered to the first matching filter, we should * always insert the specific filter for our MAC address, to try and * ensure we get that traffic. * * (If the filter for our MAC address has already been inserted by * another function, we won't receive traffic sent to us, even if we * insert a unicast mismatch filter. To prevent traffic stealing, this * therefore relies on the privilege model only allowing functions to * insert filters for their own MAC address unless explicitly given * additional privileges by the user. This also means that, even on a * priviliged function, inserting a unicast mismatch filter may not * catch all traffic in multi PCI function scenarios.) */ table->eft_unicst_filter_count = 0; rc = ef10_filter_insert_unicast(enp, mac_addr, filter_flags); if (all_unicst || (rc != 0)) { all_unicst_rc = ef10_filter_insert_all_unicast(enp, filter_flags); if ((rc != 0) && (all_unicst_rc != 0)) goto fail1; } /* * WORKAROUND_BUG26807 controls firmware support for chained multicast * filters, and can only be enabled or disabled when the hardware filter * table is empty. * * Chained multicast filters require support from the datapath firmware, * and may not be available (e.g. low-latency variants or old Huntington * firmware). * * Firmware will reset (FLR) functions which have inserted filters in * the hardware filter table when the workaround is enabled/disabled. * Functions without any hardware filters are not reset. * * Re-check if the workaround is enabled after adding unicast hardware * filters. This ensures that encp->enc_bug26807_workaround matches the * firmware state, and that later changes to enable/disable the * workaround will result in this function seeing a reset (FLR). * * In common-code drivers, we only support multiple PCI function * scenarios with firmware that supports multicast chaining, so we can * assume it is enabled for such cases and hence simplify the filter * insertion logic. Firmware that does not support multicast chaining * does not support multiple PCI function configurations either, so * filter insertion is much simpler and the same strategies can still be * used. */ if ((rc = ef10_filter_get_workarounds(enp)) != 0) goto fail2; if ((table->eft_using_all_mulcst != all_mulcst) && (encp->enc_bug26807_workaround == B_TRUE)) { /* * Multicast filter chaining is enabled, so traffic that matches * more than one multicast filter will be replicated and * delivered to multiple recipients. To avoid this duplicate * delivery, remove old multicast filters before inserting new * multicast filters. */ ef10_filter_remove_old(enp); } /* Insert or renew multicast filters */ if (all_mulcst == B_TRUE) { /* * Insert the all multicast filter. If that fails, try to insert * all of our multicast filters (but without rollback on * failure). */ all_mulcst_rc = ef10_filter_insert_all_multicast(enp, filter_flags); if (all_mulcst_rc != 0) { rc = ef10_filter_insert_multicast_list(enp, B_TRUE, brdcst, addrs, count, filter_flags, B_FALSE); if (rc != 0) goto fail3; } } else { /* * Insert filters for multicast addresses. * If any insertion fails, then rollback and try to insert the * all multicast filter instead. * If that also fails, try to insert all of the multicast * filters (but without rollback on failure). */ rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst, addrs, count, filter_flags, B_TRUE); if (rc != 0) { if ((table->eft_using_all_mulcst == B_FALSE) && (encp->enc_bug26807_workaround == B_TRUE)) { /* * Multicast filter chaining is on, so remove * old filters before inserting the multicast * all filter to avoid duplicate delivery caused * by packets matching multiple filters. */ ef10_filter_remove_old(enp); } rc = ef10_filter_insert_all_multicast(enp, filter_flags); if (rc != 0) { rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst, addrs, count, filter_flags, B_FALSE); if (rc != 0) goto fail4; } } } if (encp->enc_tunnel_encapsulations_supported != 0) { /* Try to insert filters for encapsulated packets. */ (void) ef10_filter_insert_encap_filters(enp, mulcst || all_mulcst || brdcst, filter_flags); } /* Remove old filters which were not renewed */ ef10_filter_remove_old(enp); /* report if any optional flags were rejected */ if (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) || ((all_mulcst != B_FALSE) && (all_mulcst_rc != 0))) { rc = ENOTSUP; } return (rc); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); /* Clear auto old flags */ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) { if (ef10_filter_entry_is_auto_old(table, i)) { ef10_filter_set_entry_not_auto_old(table, i); } } return (rc); } void ef10_filter_get_default_rxq( __in efx_nic_t *enp, __out efx_rxq_t **erpp, __out boolean_t *using_rss) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; *erpp = table->eft_default_rxq; *using_rss = table->eft_using_rss; } void ef10_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; #if EFSYS_OPT_RX_SCALE EFSYS_ASSERT((using_rss == B_FALSE) || (enp->en_rss_context != EF10_RSS_CONTEXT_INVALID)); table->eft_using_rss = using_rss; #else EFSYS_ASSERT(using_rss == B_FALSE); table->eft_using_rss = B_FALSE; #endif table->eft_default_rxq = erp; } void ef10_filter_default_rxq_clear( __in efx_nic_t *enp) { ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table; table->eft_default_rxq = NULL; table->eft_using_rss = B_FALSE; } #endif /* EFSYS_OPT_FILTER */ #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: projects/clang700-import/sys/dev/sfxge/common/ef10_impl.h =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/ef10_impl.h (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/ef10_impl.h (revision 340918) @@ -1,1219 +1,1215 @@ /*- * Copyright (c) 2015-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_EF10_IMPL_H #define _SYS_EF10_IMPL_H #ifdef __cplusplus extern "C" { #endif #if (EFSYS_OPT_HUNTINGTON && EFSYS_OPT_MEDFORD) #define EF10_MAX_PIOBUF_NBUFS MAX(HUNT_PIOBUF_NBUFS, MEDFORD_PIOBUF_NBUFS) #elif EFSYS_OPT_HUNTINGTON #define EF10_MAX_PIOBUF_NBUFS HUNT_PIOBUF_NBUFS #elif EFSYS_OPT_MEDFORD #define EF10_MAX_PIOBUF_NBUFS MEDFORD_PIOBUF_NBUFS #endif /* * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could * possibly be increased, or the write size reported by newer firmware used * instead. */ #define EF10_NVRAM_CHUNK 0x80 -/* Alignment requirement for value written to RX WPTR: - * the WPTR must be aligned to an 8 descriptor boundary +/* + * Alignment requirement for value written to RX WPTR: the WPTR must be aligned + * to an 8 descriptor boundary. */ #define EF10_RX_WPTR_ALIGN 8 /* * Max byte offset into the packet the TCP header must start for the hardware * to be able to parse the packet correctly. */ #define EF10_TCP_HEADER_OFFSET_LIMIT 208 /* Invalid RSS context handle */ #define EF10_RSS_CONTEXT_INVALID (0xffffffff) /* EV */ __checkReturn efx_rc_t ef10_ev_init( __in efx_nic_t *enp); void ef10_ev_fini( __in efx_nic_t *enp); __checkReturn efx_rc_t ef10_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, __in uint32_t us, __in uint32_t flags, __in efx_evq_t *eep); void ef10_ev_qdestroy( __in efx_evq_t *eep); __checkReturn efx_rc_t ef10_ev_qprime( __in efx_evq_t *eep, __in unsigned int count); void ef10_ev_qpost( __in efx_evq_t *eep, __in uint16_t data); __checkReturn efx_rc_t ef10_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us); #if EFSYS_OPT_QSTATS void ef10_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ void ef10_ev_rxlabel_init( __in efx_evq_t *eep, __in efx_rxq_t *erp, __in unsigned int label, __in efx_rxq_type_t type); void ef10_ev_rxlabel_fini( __in efx_evq_t *eep, __in unsigned int label); /* INTR */ __checkReturn efx_rc_t ef10_intr_init( __in efx_nic_t *enp, __in efx_intr_type_t type, __in efsys_mem_t *esmp); void ef10_intr_enable( __in efx_nic_t *enp); void ef10_intr_disable( __in efx_nic_t *enp); void ef10_intr_disable_unlocked( __in efx_nic_t *enp); __checkReturn efx_rc_t ef10_intr_trigger( __in efx_nic_t *enp, __in unsigned int level); void ef10_intr_status_line( __in efx_nic_t *enp, __out boolean_t *fatalp, __out uint32_t *qmaskp); void ef10_intr_status_message( __in efx_nic_t *enp, __in unsigned int message, __out boolean_t *fatalp); void ef10_intr_fatal( __in efx_nic_t *enp); void ef10_intr_fini( __in efx_nic_t *enp); /* NIC */ extern __checkReturn efx_rc_t ef10_nic_probe( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_nic_set_drv_limits( __inout efx_nic_t *enp, __in efx_drv_limits_t *edlp); extern __checkReturn efx_rc_t ef10_nic_get_vi_pool( __in efx_nic_t *enp, __out uint32_t *vi_countp); extern __checkReturn efx_rc_t ef10_nic_get_bar_region( __in efx_nic_t *enp, __in efx_nic_region_t region, __out uint32_t *offsetp, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nic_reset( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_nic_init( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG extern __checkReturn efx_rc_t ef10_nic_register_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern void ef10_nic_fini( __in efx_nic_t *enp); extern void ef10_nic_unprobe( __in efx_nic_t *enp); /* MAC */ extern __checkReturn efx_rc_t ef10_mac_poll( __in efx_nic_t *enp, __out efx_link_mode_t *link_modep); extern __checkReturn efx_rc_t ef10_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp); extern __checkReturn efx_rc_t ef10_mac_addr_set( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mac_pdu_set( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mac_pdu_get( __in efx_nic_t *enp, __out size_t *pdu); extern __checkReturn efx_rc_t ef10_mac_reconfigure( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mac_multicast_list_set( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mac_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss); extern void ef10_mac_filter_default_rxq_clear( __in efx_nic_t *enp); #if EFSYS_OPT_LOOPBACK extern __checkReturn efx_rc_t ef10_mac_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t loopback_type); #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS extern __checkReturn efx_rc_t ef10_mac_stats_get_mask( __in efx_nic_t *enp, __inout_bcount(mask_size) uint32_t *maskp, __in size_t mask_size); extern __checkReturn efx_rc_t ef10_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp); #endif /* EFSYS_OPT_MAC_STATS */ /* MCDI */ #if EFSYS_OPT_MCDI extern __checkReturn efx_rc_t ef10_mcdi_init( __in efx_nic_t *enp, __in const efx_mcdi_transport_t *mtp); extern void ef10_mcdi_fini( __in efx_nic_t *enp); extern void ef10_mcdi_send_request( __in efx_nic_t *enp, __in_bcount(hdr_len) void *hdrp, __in size_t hdr_len, __in_bcount(sdu_len) void *sdup, __in size_t sdu_len); extern __checkReturn boolean_t ef10_mcdi_poll_response( __in efx_nic_t *enp); extern void ef10_mcdi_read_response( __in efx_nic_t *enp, __out_bcount(length) void *bufferp, __in size_t offset, __in size_t length); extern efx_rc_t ef10_mcdi_poll_reboot( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_mcdi_feature_supported( __in efx_nic_t *enp, __in efx_mcdi_feature_id_t id, __out boolean_t *supportedp); extern void ef10_mcdi_get_timeout( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __out uint32_t *timeoutp); #endif /* EFSYS_OPT_MCDI */ /* NVRAM */ #if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD extern __checkReturn efx_rc_t ef10_nvram_buf_read_tlv( __in efx_nic_t *enp, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __deref_out_bcount_opt(*sizep) caddr_t *datap, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nvram_buf_write_tlv( __inout_bcount(partn_size) caddr_t partn_data, __in size_t partn_size, __in uint32_t tag, __in_bcount(tag_size) caddr_t tag_data, __in size_t tag_size, __out size_t *total_lengthp); extern __checkReturn efx_rc_t ef10_nvram_partn_read_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __deref_out_bcount_opt(*sizep) caddr_t *datap, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nvram_partn_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_write_segment_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __in boolean_t all_segments); extern __checkReturn efx_rc_t ef10_nvram_partn_lock( __in efx_nic_t *enp, __in uint32_t partn); extern __checkReturn efx_rc_t ef10_nvram_partn_unlock( __in efx_nic_t *enp, __in uint32_t partn, __out_opt uint32_t *resultp); #endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */ #if EFSYS_OPT_NVRAM #if EFSYS_OPT_DIAG extern __checkReturn efx_rc_t ef10_nvram_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern __checkReturn efx_rc_t ef10_nvram_type_to_partn( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *partnp); extern __checkReturn efx_rc_t ef10_nvram_partn_size( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nvram_partn_rw_start( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *chunk_sizep); extern __checkReturn efx_rc_t ef10_nvram_partn_read_mode( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size, __in uint32_t mode); extern __checkReturn efx_rc_t ef10_nvram_partn_read( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_read_backup( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_erase( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_write( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_nvram_partn_rw_finish( __in efx_nic_t *enp, __in uint32_t partn, __out_opt uint32_t *verify_resultp); extern __checkReturn efx_rc_t ef10_nvram_partn_get_version( __in efx_nic_t *enp, __in uint32_t partn, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]); extern __checkReturn efx_rc_t ef10_nvram_partn_set_version( __in efx_nic_t *enp, __in uint32_t partn, __in_ecount(4) uint16_t version[4]); extern __checkReturn efx_rc_t ef10_nvram_buffer_validate( __in efx_nic_t *enp, __in uint32_t partn, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size); extern __checkReturn efx_rc_t ef10_nvram_buffer_create( __in efx_nic_t *enp, __in uint16_t partn_type, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size); extern __checkReturn efx_rc_t ef10_nvram_buffer_find_item_start( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, - __out uint32_t *startp - ); + __out uint32_t *startp); extern __checkReturn efx_rc_t ef10_nvram_buffer_find_end( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, - __out uint32_t *endp - ); + __out uint32_t *endp); extern __checkReturn __success(return != B_FALSE) boolean_t ef10_nvram_buffer_find_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, - __out uint32_t *lengthp - ); + __out uint32_t *lengthp); extern __checkReturn efx_rc_t ef10_nvram_buffer_get_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(item_max_size, *lengthp) caddr_t itemp, __in size_t item_max_size, - __out uint32_t *lengthp - ); + __out uint32_t *lengthp); extern __checkReturn efx_rc_t ef10_nvram_buffer_insert_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, - __out uint32_t *lengthp - ); + __out uint32_t *lengthp); extern __checkReturn efx_rc_t ef10_nvram_buffer_delete_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, - __in uint32_t end - ); + __in uint32_t end); extern __checkReturn efx_rc_t ef10_nvram_buffer_finish( __in_bcount(buffer_size) caddr_t bufferp, - __in size_t buffer_size - ); + __in size_t buffer_size); #endif /* EFSYS_OPT_NVRAM */ /* PHY */ typedef struct ef10_link_state_s { uint32_t els_adv_cap_mask; uint32_t els_lp_cap_mask; unsigned int els_fcntl; efx_link_mode_t els_link_mode; #if EFSYS_OPT_LOOPBACK efx_loopback_type_t els_loopback; #endif boolean_t els_mac_up; } ef10_link_state_t; extern void ef10_phy_link_ev( __in efx_nic_t *enp, __in efx_qword_t *eqp, __out efx_link_mode_t *link_modep); extern __checkReturn efx_rc_t ef10_phy_get_link( __in efx_nic_t *enp, __out ef10_link_state_t *elsp); extern __checkReturn efx_rc_t ef10_phy_power( __in efx_nic_t *enp, __in boolean_t on); extern __checkReturn efx_rc_t ef10_phy_reconfigure( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_phy_verify( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip); #if EFSYS_OPT_PHY_STATS extern __checkReturn efx_rc_t ef10_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat); #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_BIST extern __checkReturn efx_rc_t ef10_bist_enable_offline( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type); extern __checkReturn efx_rc_t ef10_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt __drv_when(count > 0, __notnull) uint32_t *value_maskp, __out_ecount_opt(count) __drv_when(count > 0, __notnull) unsigned long *valuesp, __in size_t count); extern void ef10_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type); #endif /* EFSYS_OPT_BIST */ /* TX */ extern __checkReturn efx_rc_t ef10_tx_init( __in efx_nic_t *enp); extern void ef10_tx_fini( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __in efx_txq_t *etp, __out unsigned int *addedp); extern void ef10_tx_qdestroy( __in efx_txq_t *etp); -extern __checkReturn efx_rc_t +extern __checkReturn efx_rc_t ef10_tx_qpost( - __in efx_txq_t *etp, - __in_ecount(n) efx_buffer_t *eb, - __in unsigned int n, - __in unsigned int completed, - __inout unsigned int *addedp); + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_buffer_t *ebp, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp); extern void ef10_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed); #if EFSYS_OPT_RX_PACKED_STREAM extern void ef10_rx_qpush_ps_credits( __in efx_rxq_t *erp); extern __checkReturn uint8_t * ef10_rx_qps_packet_info( __in efx_rxq_t *erp, __in uint8_t *buffer, __in uint32_t buffer_length, __in uint32_t current_offset, __out uint16_t *lengthp, __out uint32_t *next_offsetp, __out uint32_t *timestamp); #endif extern __checkReturn efx_rc_t ef10_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns); extern __checkReturn efx_rc_t ef10_tx_qflush( __in efx_txq_t *etp); extern void ef10_tx_qenable( __in efx_txq_t *etp); extern __checkReturn efx_rc_t ef10_tx_qpio_enable( __in efx_txq_t *etp); extern void ef10_tx_qpio_disable( __in efx_txq_t *etp); extern __checkReturn efx_rc_t ef10_tx_qpio_write( __in efx_txq_t *etp, __in_ecount(buf_length) uint8_t *buffer, __in size_t buf_length, __in size_t pio_buf_offset); extern __checkReturn efx_rc_t ef10_tx_qpio_post( __in efx_txq_t *etp, __in size_t pkt_length, __in unsigned int completed, __inout unsigned int *addedp); extern __checkReturn efx_rc_t ef10_tx_qdesc_post( __in efx_txq_t *etp, __in_ecount(n) efx_desc_t *ed, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); extern void ef10_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp); extern void ef10_tx_qdesc_tso_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint8_t tcp_flags, __out efx_desc_t *edp); extern void ef10_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint16_t tcp_mss, __out_ecount(count) efx_desc_t *edp, __in int count); extern void ef10_tx_qdesc_vlantci_create( __in efx_txq_t *etp, __in uint16_t vlan_tci, __out efx_desc_t *edp); #if EFSYS_OPT_QSTATS extern void ef10_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ typedef uint32_t efx_piobuf_handle_t; -#define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t) -1) +#define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t)-1) extern __checkReturn efx_rc_t ef10_nic_pio_alloc( __inout efx_nic_t *enp, __out uint32_t *bufnump, __out efx_piobuf_handle_t *handlep, __out uint32_t *blknump, __out uint32_t *offsetp, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_nic_pio_free( __inout efx_nic_t *enp, __in uint32_t bufnum, __in uint32_t blknum); extern __checkReturn efx_rc_t ef10_nic_pio_link( __inout efx_nic_t *enp, __in uint32_t vi_index, __in efx_piobuf_handle_t handle); extern __checkReturn efx_rc_t ef10_nic_pio_unlink( __inout efx_nic_t *enp, __in uint32_t vi_index); /* VPD */ #if EFSYS_OPT_VPD extern __checkReturn efx_rc_t ef10_vpd_init( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_vpd_size( __in efx_nic_t *enp, __out size_t *sizep); extern __checkReturn efx_rc_t ef10_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t ef10_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp); extern __checkReturn efx_rc_t ef10_vpd_set( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp); extern __checkReturn efx_rc_t ef10_vpd_next( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp); extern __checkReturn efx_rc_t ef10_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern void ef10_vpd_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_VPD */ /* RX */ extern __checkReturn efx_rc_t ef10_rx_init( __in efx_nic_t *enp); #if EFSYS_OPT_RX_SCATTER extern __checkReturn efx_rc_t ef10_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size); #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE extern __checkReturn efx_rc_t ef10_rx_scale_context_alloc( __in efx_nic_t *enp, __in efx_rx_scale_context_type_t type, __in uint32_t num_queues, __out uint32_t *rss_contextp); extern __checkReturn efx_rc_t ef10_rx_scale_context_free( __in efx_nic_t *enp, __in uint32_t rss_context); extern __checkReturn efx_rc_t ef10_rx_scale_mode_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert); extern __checkReturn efx_rc_t ef10_rx_scale_key_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) uint8_t *key, __in size_t n); extern __checkReturn efx_rc_t ef10_rx_scale_tbl_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) unsigned int *table, __in size_t n); extern __checkReturn uint32_t ef10_rx_prefix_hash( __in efx_nic_t *enp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer); #endif /* EFSYS_OPT_RX_SCALE */ extern __checkReturn efx_rc_t ef10_rx_prefix_pktlen( __in efx_nic_t *enp, __in uint8_t *buffer, __out uint16_t *lengthp); -extern void +extern void ef10_rx_qpost( - __in efx_rxq_t *erp, - __in_ecount(n) efsys_dma_addr_t *addrp, - __in size_t size, - __in unsigned int n, - __in unsigned int completed, - __in unsigned int added); + __in efx_rxq_t *erp, + __in_ecount(ndescs) efsys_dma_addr_t *addrp, + __in size_t size, + __in unsigned int ndescs, + __in unsigned int completed, + __in unsigned int added); extern void ef10_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp); extern __checkReturn efx_rc_t ef10_rx_qflush( __in efx_rxq_t *erp); extern void ef10_rx_qenable( __in efx_rxq_t *erp); extern __checkReturn efx_rc_t ef10_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, + __in uint32_t type_data, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, + __in unsigned int flags, __in efx_evq_t *eep, __in efx_rxq_t *erp); extern void ef10_rx_qdestroy( __in efx_rxq_t *erp); extern void ef10_rx_fini( __in efx_nic_t *enp); #if EFSYS_OPT_FILTER typedef struct ef10_filter_handle_s { uint32_t efh_lo; uint32_t efh_hi; } ef10_filter_handle_t; typedef struct ef10_filter_entry_s { uintptr_t efe_spec; /* pointer to filter spec plus busy bit */ ef10_filter_handle_t efe_handle; } ef10_filter_entry_t; /* * BUSY flag indicates that an update is in progress. * AUTO_OLD flag is used to mark and sweep MAC packet filters. */ #define EFX_EF10_FILTER_FLAG_BUSY 1U #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2U #define EFX_EF10_FILTER_FLAGS 3U /* * Size of the hash table used by the driver. Doesn't need to be the * same size as the hardware's table. */ #define EFX_EF10_FILTER_TBL_ROWS 8192 /* Only need to allow for one directed and one unknown unicast filter */ #define EFX_EF10_FILTER_UNICAST_FILTERS_MAX 2 /* Allow for the broadcast address to be added to the multicast list */ #define EFX_EF10_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1) /* * For encapsulated packets, there is one filter each for each combination of * IPv4 or IPv6 outer frame, VXLAN, GENEVE or NVGRE packet type, and unicast or * multicast inner frames. */ -#define EFX_EF10_FILTER_ENCAP_FILTERS_MAX 12 +#define EFX_EF10_FILTER_ENCAP_FILTERS_MAX 12 typedef struct ef10_filter_table_s { ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS]; efx_rxq_t *eft_default_rxq; boolean_t eft_using_rss; uint32_t eft_unicst_filter_indexes[ EFX_EF10_FILTER_UNICAST_FILTERS_MAX]; uint32_t eft_unicst_filter_count; uint32_t eft_mulcst_filter_indexes[ EFX_EF10_FILTER_MULTICAST_FILTERS_MAX]; uint32_t eft_mulcst_filter_count; boolean_t eft_using_all_mulcst; uint32_t eft_encap_filter_indexes[ EFX_EF10_FILTER_ENCAP_FILTERS_MAX]; uint32_t eft_encap_filter_count; } ef10_filter_table_t; __checkReturn efx_rc_t ef10_filter_init( __in efx_nic_t *enp); void ef10_filter_fini( __in efx_nic_t *enp); __checkReturn efx_rc_t ef10_filter_restore( __in efx_nic_t *enp); __checkReturn efx_rc_t ef10_filter_add( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec, __in boolean_t may_replace); __checkReturn efx_rc_t ef10_filter_delete( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec); extern __checkReturn efx_rc_t ef10_filter_supported_filters( __in efx_nic_t *enp, __out_ecount(buffer_length) uint32_t *buffer, __in size_t buffer_length, __out size_t *list_lengthp); extern __checkReturn efx_rc_t ef10_filter_reconfigure( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *mac_addr, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in uint32_t count); extern void ef10_filter_get_default_rxq( __in efx_nic_t *enp, __out efx_rxq_t **erpp, __out boolean_t *using_rss); extern void ef10_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss); extern void ef10_filter_default_rxq_clear( __in efx_nic_t *enp); #endif /* EFSYS_OPT_FILTER */ extern __checkReturn efx_rc_t efx_mcdi_get_function_info( __in efx_nic_t *enp, __out uint32_t *pfp, __out_opt uint32_t *vfp); extern __checkReturn efx_rc_t efx_mcdi_privilege_mask( __in efx_nic_t *enp, __in uint32_t pf, __in uint32_t vf, __out uint32_t *maskp); extern __checkReturn efx_rc_t efx_mcdi_get_port_assignment( __in efx_nic_t *enp, __out uint32_t *portp); extern __checkReturn efx_rc_t efx_mcdi_get_port_modes( __in efx_nic_t *enp, __out uint32_t *modesp, __out_opt uint32_t *current_modep); extern __checkReturn efx_rc_t ef10_nic_get_port_mode_bandwidth( __in uint32_t port_mode, __out uint32_t *bandwidth_mbpsp); extern __checkReturn efx_rc_t efx_mcdi_get_mac_address_pf( __in efx_nic_t *enp, __out_ecount_opt(6) uint8_t mac_addrp[6]); extern __checkReturn efx_rc_t efx_mcdi_get_mac_address_vf( __in efx_nic_t *enp, __out_ecount_opt(6) uint8_t mac_addrp[6]); extern __checkReturn efx_rc_t efx_mcdi_get_clock( __in efx_nic_t *enp, __out uint32_t *sys_freqp, __out uint32_t *dpcpu_freqp); extern __checkReturn efx_rc_t efx_mcdi_get_vector_cfg( __in efx_nic_t *enp, __out_opt uint32_t *vec_basep, __out_opt uint32_t *pf_nvecp, __out_opt uint32_t *vf_nvecp); extern __checkReturn efx_rc_t ef10_get_datapath_caps( __in efx_nic_t *enp); extern __checkReturn efx_rc_t ef10_get_privilege_mask( __in efx_nic_t *enp, __out uint32_t *maskp); extern __checkReturn efx_rc_t ef10_external_port_mapping( __in efx_nic_t *enp, __in uint32_t port, __out uint8_t *external_portp); #if EFSYS_OPT_RX_PACKED_STREAM /* Data space per credit in packed stream mode */ #define EFX_RX_PACKED_STREAM_MEM_PER_CREDIT (1 << 16) /* * Received packets are always aligned at this boundary. Also there always * exists a gap of this size between packets. * (see SF-112241-TC, 4.5) */ #define EFX_RX_PACKED_STREAM_ALIGNMENT 64 /* * Size of a pseudo-header prepended to received packets * in packed stream mode */ #define EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE 8 /* Minimum space for packet in packed stream mode */ #define EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE \ P2ROUNDUP(EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE + \ - EFX_MAC_PDU_MIN + \ - EFX_RX_PACKED_STREAM_ALIGNMENT, \ - EFX_RX_PACKED_STREAM_ALIGNMENT) + EFX_MAC_PDU_MIN + \ + EFX_RX_PACKED_STREAM_ALIGNMENT, \ + EFX_RX_PACKED_STREAM_ALIGNMENT) /* Maximum number of credits */ #define EFX_RX_PACKED_STREAM_MAX_CREDITS 127 #endif /* EFSYS_OPT_RX_PACKED_STREAM */ #ifdef __cplusplus } #endif #endif /* _SYS_EF10_IMPL_H */ Index: projects/clang700-import/sys/dev/sfxge/common/ef10_nvram.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/ef10_nvram.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/ef10_nvram.c (revision 340918) @@ -1,2389 +1,2392 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM #include "ef10_tlv_layout.h" /* Cursor for TLV partition format */ typedef struct tlv_cursor_s { uint32_t *block; /* Base of data block */ uint32_t *current; /* Cursor position */ uint32_t *end; /* End tag position */ uint32_t *limit; /* Last dword of data block */ } tlv_cursor_t; typedef struct nvram_partition_s { uint16_t type; uint8_t chip_select; uint8_t flags; /* * The full length of the NVRAM partition. * This is different from tlv_partition_header.total_length, * which can be smaller. */ uint32_t length; uint32_t erase_size; uint32_t *data; tlv_cursor_t tlv_cursor; } nvram_partition_t; static __checkReturn efx_rc_t tlv_validate_state( __inout tlv_cursor_t *cursor); static void tlv_init_block( __out uint32_t *block) { *block = __CPU_TO_LE_32(TLV_TAG_END); } static uint32_t tlv_tag( __in tlv_cursor_t *cursor) { uint32_t dword, tag; dword = cursor->current[0]; tag = __LE_TO_CPU_32(dword); return (tag); } static size_t tlv_length( __in tlv_cursor_t *cursor) { uint32_t dword, length; if (tlv_tag(cursor) == TLV_TAG_END) return (0); dword = cursor->current[1]; length = __LE_TO_CPU_32(dword); return ((size_t)length); } static uint8_t * tlv_value( __in tlv_cursor_t *cursor) { if (tlv_tag(cursor) == TLV_TAG_END) return (NULL); return ((uint8_t *)(&cursor->current[2])); } static uint8_t * tlv_item( __in tlv_cursor_t *cursor) { if (tlv_tag(cursor) == TLV_TAG_END) return (NULL); return ((uint8_t *)cursor->current); } /* * TLV item DWORD length is tag + length + value (rounded up to DWORD) * equivalent to tlv_n_words_for_len in mc-comms tlv.c */ #define TLV_DWORD_COUNT(length) \ (1 + 1 + (((length) + sizeof (uint32_t) - 1) / sizeof (uint32_t))) static uint32_t * tlv_next_item_ptr( __in tlv_cursor_t *cursor) { uint32_t length; length = tlv_length(cursor); return (cursor->current + TLV_DWORD_COUNT(length)); } static __checkReturn efx_rc_t tlv_advance( __inout tlv_cursor_t *cursor) { efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (cursor->current == cursor->end) { /* No more tags after END tag */ cursor->current = NULL; rc = ENOENT; goto fail2; } /* Advance to next item and validate */ cursor->current = tlv_next_item_ptr(cursor); if ((rc = tlv_validate_state(cursor)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static efx_rc_t tlv_rewind( __in tlv_cursor_t *cursor) { efx_rc_t rc; cursor->current = cursor->block; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static efx_rc_t tlv_find( __inout tlv_cursor_t *cursor, __in uint32_t tag) { efx_rc_t rc; rc = tlv_rewind(cursor); while (rc == 0) { if (tlv_tag(cursor) == tag) break; rc = tlv_advance(cursor); } return (rc); } static __checkReturn efx_rc_t tlv_validate_state( __inout tlv_cursor_t *cursor) { efx_rc_t rc; /* Check cursor position */ if (cursor->current < cursor->block) { rc = EINVAL; goto fail1; } if (cursor->current > cursor->limit) { rc = EINVAL; goto fail2; } if (tlv_tag(cursor) != TLV_TAG_END) { /* Check current item has space for tag and length */ if (cursor->current > (cursor->limit - 2)) { cursor->current = NULL; rc = EFAULT; goto fail3; } /* Check we have value data for current item and another tag */ if (tlv_next_item_ptr(cursor) > (cursor->limit - 1)) { cursor->current = NULL; rc = EFAULT; goto fail4; } } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static efx_rc_t tlv_init_cursor( __out tlv_cursor_t *cursor, __in uint32_t *block, __in uint32_t *limit, __in uint32_t *current) { cursor->block = block; cursor->limit = limit; cursor->current = current; cursor->end = NULL; return (tlv_validate_state(cursor)); } static __checkReturn efx_rc_t tlv_init_cursor_from_size( __out tlv_cursor_t *cursor, __in_bcount(size) uint8_t *block, __in size_t size) { uint32_t *limit; limit = (uint32_t *)(block + size - sizeof (uint32_t)); return (tlv_init_cursor(cursor, (uint32_t *)block, limit, (uint32_t *)block)); } static __checkReturn efx_rc_t tlv_init_cursor_at_offset( __out tlv_cursor_t *cursor, __in_bcount(size) uint8_t *block, __in size_t size, __in size_t offset) { uint32_t *limit; uint32_t *current; limit = (uint32_t *)(block + size - sizeof (uint32_t)); current = (uint32_t *)(block + offset); return (tlv_init_cursor(cursor, (uint32_t *)block, limit, current)); } static __checkReturn efx_rc_t tlv_require_end( __inout tlv_cursor_t *cursor) { uint32_t *pos; efx_rc_t rc; if (cursor->end == NULL) { pos = cursor->current; if ((rc = tlv_find(cursor, TLV_TAG_END)) != 0) goto fail1; cursor->end = cursor->current; cursor->current = pos; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static size_t tlv_block_length_used( __inout tlv_cursor_t *cursor) { efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if ((rc = tlv_require_end(cursor)) != 0) goto fail2; /* Return space used (including the END tag) */ return (cursor->end + 1 - cursor->block) * sizeof (uint32_t); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (0); } static uint32_t * tlv_last_segment_end( __in tlv_cursor_t *cursor) { tlv_cursor_t segment_cursor; uint32_t *last_segment_end = cursor->block; uint32_t *segment_start = cursor->block; /* * Go through each segment and check that it has an end tag. If there * is no end tag then the previous segment was the last valid one, * so return the pointer to its end tag. */ for (;;) { if (tlv_init_cursor(&segment_cursor, segment_start, cursor->limit, segment_start) != 0) break; if (tlv_require_end(&segment_cursor) != 0) break; last_segment_end = segment_cursor.end; segment_start = segment_cursor.end + 1; } return (last_segment_end); } static uint32_t * tlv_write( __in tlv_cursor_t *cursor, __in uint32_t tag, __in_bcount(size) uint8_t *data, __in size_t size) { uint32_t len = size; uint32_t *ptr; ptr = cursor->current; *ptr++ = __CPU_TO_LE_32(tag); *ptr++ = __CPU_TO_LE_32(len); if (len > 0) { ptr[(len - 1) / sizeof (uint32_t)] = 0; memcpy(ptr, data, len); ptr += P2ROUNDUP(len, sizeof (uint32_t)) / sizeof (*ptr); } return (ptr); } static __checkReturn efx_rc_t tlv_insert( __inout tlv_cursor_t *cursor, __in uint32_t tag, __in_bcount(size) uint8_t *data, __in size_t size) { unsigned int delta; uint32_t *last_segment_end; efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if ((rc = tlv_require_end(cursor)) != 0) goto fail2; if (tag == TLV_TAG_END) { rc = EINVAL; goto fail3; } last_segment_end = tlv_last_segment_end(cursor); delta = TLV_DWORD_COUNT(size); if (last_segment_end + 1 + delta > cursor->limit) { rc = ENOSPC; goto fail4; } /* Move data up: new space at cursor->current */ memmove(cursor->current + delta, cursor->current, (last_segment_end + 1 - cursor->current) * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end += delta; /* Write new TLV item */ tlv_write(cursor, tag, data, size); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t tlv_delete( __inout tlv_cursor_t *cursor) { unsigned int delta; uint32_t *last_segment_end; efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (tlv_tag(cursor) == TLV_TAG_END) { rc = EINVAL; goto fail2; } delta = TLV_DWORD_COUNT(tlv_length(cursor)); if ((rc = tlv_require_end(cursor)) != 0) goto fail3; last_segment_end = tlv_last_segment_end(cursor); /* Shuffle things down, destroying the item at cursor->current */ memmove(cursor->current, cursor->current + delta, (last_segment_end + 1 - cursor->current) * sizeof (uint32_t)); /* Zero the new space at the end of the TLV chain */ memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end -= delta; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t tlv_modify( __inout tlv_cursor_t *cursor, __in uint32_t tag, __in_bcount(size) uint8_t *data, __in size_t size) { uint32_t *pos; unsigned int old_ndwords; unsigned int new_ndwords; unsigned int delta; uint32_t *last_segment_end; efx_rc_t rc; if ((rc = tlv_validate_state(cursor)) != 0) goto fail1; if (tlv_tag(cursor) == TLV_TAG_END) { rc = EINVAL; goto fail2; } if (tlv_tag(cursor) != tag) { rc = EINVAL; goto fail3; } old_ndwords = TLV_DWORD_COUNT(tlv_length(cursor)); new_ndwords = TLV_DWORD_COUNT(size); if ((rc = tlv_require_end(cursor)) != 0) goto fail4; last_segment_end = tlv_last_segment_end(cursor); if (new_ndwords > old_ndwords) { /* Expand space used for TLV item */ delta = new_ndwords - old_ndwords; pos = cursor->current + old_ndwords; if (last_segment_end + 1 + delta > cursor->limit) { rc = ENOSPC; goto fail5; } /* Move up: new space at (cursor->current + old_ndwords) */ memmove(pos + delta, pos, (last_segment_end + 1 - pos) * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end += delta; } else if (new_ndwords < old_ndwords) { /* Shrink space used for TLV item */ delta = old_ndwords - new_ndwords; pos = cursor->current + new_ndwords; /* Move down: remove words at (cursor->current + new_ndwords) */ memmove(pos, pos + delta, (last_segment_end + 1 - pos) * sizeof (uint32_t)); /* Zero the new space at the end of the TLV chain */ memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t)); /* Adjust the end pointer */ cursor->end -= delta; } /* Write new data */ tlv_write(cursor, tag, data, size); return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static uint32_t checksum_tlv_partition( __in nvram_partition_t *partition) { tlv_cursor_t *cursor; uint32_t *ptr; uint32_t *end; uint32_t csum; size_t len; cursor = &partition->tlv_cursor; len = tlv_block_length_used(cursor); EFSYS_ASSERT3U((len & 3), ==, 0); csum = 0; ptr = partition->data; end = &ptr[len >> 2]; while (ptr < end) csum += __LE_TO_CPU_32(*ptr++); return (csum); } static __checkReturn efx_rc_t tlv_update_partition_len_and_cks( __in tlv_cursor_t *cursor) { efx_rc_t rc; nvram_partition_t partition; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t new_len; /* * We just modified the partition, so the total length may not be * valid. Don't use tlv_find(), which performs some sanity checks * that may fail here. */ partition.data = cursor->block; memcpy(&partition.tlv_cursor, cursor, sizeof (*cursor)); header = (struct tlv_partition_header *)partition.data; /* Sanity check. */ if (__LE_TO_CPU_32(header->tag) != TLV_TAG_PARTITION_HEADER) { rc = EFAULT; goto fail1; } new_len = tlv_block_length_used(&partition.tlv_cursor); if (new_len == 0) { rc = EFAULT; goto fail2; } header->total_length = __CPU_TO_LE_32(new_len); /* Ensure the modified partition always has a new generation count. */ header->generation = __CPU_TO_LE_32( __LE_TO_CPU_32(header->generation) + 1); trailer = (struct tlv_partition_trailer *)((uint8_t *)header + new_len - sizeof (*trailer) - sizeof (uint32_t)); trailer->generation = header->generation; trailer->checksum = __CPU_TO_LE_32( __LE_TO_CPU_32(trailer->checksum) - checksum_tlv_partition(&partition)); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Validate buffer contents (before writing to flash) */ __checkReturn efx_rc_t ef10_nvram_buffer_validate( __in efx_nic_t *enp, __in uint32_t partn, __in_bcount(partn_size) caddr_t partn_data, __in size_t partn_size) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t total_length; uint32_t cksum; int pos; efx_rc_t rc; + _NOTE(ARGUNUSED(enp, partn)) EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK); if ((partn_data == NULL) || (partn_size == 0)) { rc = EINVAL; goto fail1; } /* The partition header must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)partn_data, partn_size)) != 0) { rc = EFAULT; goto fail2; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail3; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV partition length (includes the END tag) */ total_length = __LE_TO_CPU_32(header->total_length); if (total_length > partn_size) { rc = EFBIG; goto fail4; } /* Check partition ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail5; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail6; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail7; } /* Check generation counts are consistent */ if (trailer->generation != header->generation) { rc = EINVAL; goto fail8; } /* Verify partition checksum */ cksum = 0; for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(partn_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail9; } return (0); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_create( __in efx_nic_t *enp, __in uint16_t partn_type, __in_bcount(partn_size) caddr_t partn_data, __in size_t partn_size) { uint32_t *buf = (uint32_t *)partn_data; efx_rc_t rc; tlv_cursor_t cursor; struct tlv_partition_header header; struct tlv_partition_trailer trailer; unsigned int min_buf_size = sizeof (struct tlv_partition_header) + sizeof (struct tlv_partition_trailer); if (partn_size < min_buf_size) { rc = EINVAL; goto fail1; } memset(buf, 0xff, partn_size); tlv_init_block(buf); if ((rc = tlv_init_cursor(&cursor, buf, (uint32_t *)((uint8_t *)buf + partn_size), buf)) != 0) { goto fail2; } header.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_HEADER); header.length = __CPU_TO_LE_32(sizeof (header) - 8); header.type_id = __CPU_TO_LE_16(partn_type); header.preset = 0; header.generation = __CPU_TO_LE_32(1); header.total_length = 0; /* This will be fixed below. */ if ((rc = tlv_insert( &cursor, TLV_TAG_PARTITION_HEADER, (uint8_t *)&header.type_id, sizeof (header) - 8)) != 0) goto fail3; if ((rc = tlv_advance(&cursor)) != 0) goto fail4; trailer.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_TRAILER); trailer.length = __CPU_TO_LE_32(sizeof (trailer) - 8); trailer.generation = header.generation; trailer.checksum = 0; /* This will be fixed below. */ if ((rc = tlv_insert(&cursor, TLV_TAG_PARTITION_TRAILER, (uint8_t *)&trailer.generation, sizeof (trailer) - 8)) != 0) goto fail5; if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0) goto fail6; /* Check that the partition is valid. */ if ((rc = ef10_nvram_buffer_validate(enp, partn_type, partn_data, partn_size)) != 0) goto fail7; return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static uint32_t byte_offset( __in uint32_t *position, __in uint32_t *base) { return (uint32_t)((uint8_t *)position - (uint8_t *)base); } __checkReturn efx_rc_t ef10_nvram_buffer_find_item_start( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __out uint32_t *startp) { /* Read past partition header to find start address of the first key */ tlv_cursor_t cursor; efx_rc_t rc; /* A PARTITION_HEADER tag must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail3; } *startp = byte_offset(cursor.current, cursor.block); if ((rc = tlv_require_end(&cursor)) != 0) goto fail4; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_find_end( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *endp) { /* Read to end of partition */ tlv_cursor_t cursor; efx_rc_t rc; uint32_t *segment_used; _NOTE(ARGUNUSED(offset)) if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } segment_used = cursor.block; /* * Go through each segment and check that it has an end tag. If there * is no end tag then the previous segment was the last valid one, * so return the used space including that end tag. */ while (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) { if (tlv_require_end(&cursor) != 0) { if (segment_used == cursor.block) { /* * First segment is corrupt, so there is * no valid data in partition. */ rc = EINVAL; goto fail2; } break; } segment_used = cursor.end + 1; cursor.current = segment_used; } /* Return space used (including the END tag) */ *endp = (segment_used - cursor.block) * sizeof (uint32_t); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn __success(return != B_FALSE) boolean_t ef10_nvram_buffer_find_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, __out uint32_t *lengthp) { /* Find TLV at offset and return key start and length */ tlv_cursor_t cursor; uint8_t *key; uint32_t tag; if (tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset) != 0) { return (B_FALSE); } while ((key = tlv_item(&cursor)) != NULL) { tag = tlv_tag(&cursor); if (tag == TLV_TAG_PARTITION_HEADER || tag == TLV_TAG_PARTITION_TRAILER) { if (tlv_advance(&cursor) != 0) { break; } continue; } *startp = byte_offset(cursor.current, cursor.block); *lengthp = byte_offset(tlv_next_item_ptr(&cursor), cursor.current); return (B_TRUE); } return (B_FALSE); } __checkReturn efx_rc_t ef10_nvram_buffer_get_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(item_max_size, *lengthp) caddr_t itemp, __in size_t item_max_size, __out uint32_t *lengthp) { efx_rc_t rc; tlv_cursor_t cursor; uint32_t item_length; if (item_max_size < length) { rc = ENOSPC; goto fail1; } if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset)) != 0) { goto fail2; } item_length = tlv_length(&cursor); if (length < item_length) { rc = ENOSPC; goto fail3; } memcpy(itemp, tlv_value(&cursor), item_length); *lengthp = item_length; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_insert_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, __out uint32_t *lengthp) { efx_rc_t rc; tlv_cursor_t cursor; if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset)) != 0) { goto fail1; } rc = tlv_insert(&cursor, TLV_TAG_LICENSE, (uint8_t *)keyp, length); if (rc != 0) { goto fail2; } *lengthp = byte_offset(tlv_next_item_ptr(&cursor), cursor.current); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_delete_item( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end) { efx_rc_t rc; tlv_cursor_t cursor; _NOTE(ARGUNUSED(length, end)) if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp, buffer_size, offset)) != 0) { goto fail1; } if ((rc = tlv_delete(&cursor)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_buffer_finish( __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size) { efx_rc_t rc; tlv_cursor_t cursor; if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } if ((rc = tlv_require_end(&cursor)) != 0) goto fail2; if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Read and validate a segment from a partition. A segment is a complete * tlv chain between PARTITION_HEADER and PARTITION_END tags. There may * be multiple segments in a partition, so seg_offset allows segments * beyond the first to be read. */ static __checkReturn efx_rc_t ef10_nvram_read_tlv_segment( __in efx_nic_t *enp, __in uint32_t partn, __in size_t seg_offset, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; size_t total_length; uint32_t cksum; int pos; efx_rc_t rc; EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK); if ((seg_data == NULL) || (max_seg_size == 0)) { rc = EINVAL; goto fail1; } /* Read initial chunk of the segment, starting at offset */ if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset, seg_data, EF10_NVRAM_CHUNK, MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) { goto fail2; } /* A PARTITION_HEADER tag must be the first item at the given offset */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail3; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail4; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV segment length (includes the END tag) */ total_length = __LE_TO_CPU_32(header->total_length); if (total_length > max_seg_size) { rc = EFBIG; goto fail5; } /* Read the remaining segment content */ if (total_length > EF10_NVRAM_CHUNK) { if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset + EF10_NVRAM_CHUNK, seg_data + EF10_NVRAM_CHUNK, total_length - EF10_NVRAM_CHUNK, MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) goto fail6; } /* Check segment ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail7; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail8; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail9; } /* Check data read from segment is consistent */ if (trailer->generation != header->generation) { /* * The partition data may have been modified between successive * MCDI NVRAM_READ requests by the MC or another PCI function. * * The caller must retry to obtain consistent partition data. */ rc = EAGAIN; goto fail10; } /* Verify segment checksum */ cksum = 0; for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail11; } return (0); fail11: EFSYS_PROBE(fail11); fail10: EFSYS_PROBE(fail10); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Read a single TLV item from a host memory * buffer containing a TLV formatted segment. */ __checkReturn efx_rc_t ef10_nvram_buf_read_tlv( __in efx_nic_t *enp, __in_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __deref_out_bcount_opt(*sizep) caddr_t *datap, __out size_t *sizep) { tlv_cursor_t cursor; caddr_t data; size_t length; caddr_t value; efx_rc_t rc; + + _NOTE(ARGUNUSED(enp)) if ((seg_data == NULL) || (max_seg_size == 0)) { rc = EINVAL; goto fail1; } /* Find requested TLV tag in segment data */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail2; } if ((rc = tlv_find(&cursor, tag)) != 0) { rc = ENOENT; goto fail3; } value = (caddr_t)tlv_value(&cursor); length = tlv_length(&cursor); if (length == 0) data = NULL; else { /* Copy out data from TLV item */ EFSYS_KMEM_ALLOC(enp->en_esip, length, data); if (data == NULL) { rc = ENOMEM; goto fail4; } memcpy(data, value, length); } *datap = data; *sizep = length; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Read a single TLV item from the first segment in a TLV formatted partition */ __checkReturn efx_rc_t ef10_nvram_partn_read_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __deref_out_bcount_opt(*seg_sizep) caddr_t *seg_datap, __out size_t *seg_sizep) { caddr_t seg_data = NULL; size_t partn_size = 0; size_t length; caddr_t data; int retry; efx_rc_t rc; /* Allocate sufficient memory for the entire partition */ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0) goto fail1; if (partn_size == 0) { rc = ENOENT; goto fail2; } EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, seg_data); if (seg_data == NULL) { rc = ENOMEM; goto fail3; } /* * Read the first segment in a TLV partition. Retry until consistent * segment contents are returned. Inconsistent data may be read if: * a) the segment contents are invalid * b) the MC has rebooted while we were reading the partition * c) the partition has been modified while we were reading it * Limit retry attempts to ensure forward progress. */ retry = 10; do { rc = ef10_nvram_read_tlv_segment(enp, partn, 0, seg_data, partn_size); } while ((rc == EAGAIN) && (--retry > 0)); if (rc != 0) { /* Failed to obtain consistent segment data */ goto fail4; } if ((rc = ef10_nvram_buf_read_tlv(enp, seg_data, partn_size, tag, &data, &length)) != 0) goto fail5; EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data); *seg_datap = data; *seg_sizep = length; return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* Compute the size of a segment. */ static __checkReturn efx_rc_t ef10_nvram_buf_segment_size( __in caddr_t seg_data, __in size_t max_seg_size, __out size_t *seg_sizep) { efx_rc_t rc; tlv_cursor_t cursor; struct tlv_partition_header *header; uint32_t cksum; int pos; uint32_t *end_tag_position; uint32_t segment_length; /* A PARTITION_HEADER tag must be the first item at the given offset */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Check TLV segment length (includes the END tag) */ *seg_sizep = __LE_TO_CPU_32(header->total_length); if (*seg_sizep > max_seg_size) { rc = EFBIG; goto fail3; } /* Check segment ends with PARTITION_TRAILER and END tags */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail4; } if ((rc = tlv_advance(&cursor)) != 0) { rc = EINVAL; goto fail5; } if (tlv_tag(&cursor) != TLV_TAG_END) { rc = EINVAL; goto fail6; } end_tag_position = cursor.current; /* Verify segment checksum */ cksum = 0; for (pos = 0; (size_t)pos < *seg_sizep; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } if (cksum != 0) { rc = EINVAL; goto fail7; } /* * Calculate total length from HEADER to END tags and compare to * max_seg_size and the total_length field in the HEADER tag. */ segment_length = tlv_block_length_used(&cursor); if (segment_length > max_seg_size) { rc = EINVAL; goto fail8; } if (segment_length != *seg_sizep) { rc = EINVAL; goto fail9; } /* Skip over the first HEADER tag. */ rc = tlv_rewind(&cursor); rc = tlv_advance(&cursor); while (rc == 0) { if (tlv_tag(&cursor) == TLV_TAG_END) { /* Check that the END tag is the one found earlier. */ if (cursor.current != end_tag_position) goto fail10; break; } /* Check for duplicate HEADER tags before the END tag. */ if (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail11; } rc = tlv_advance(&cursor); } if (rc != 0) goto fail12; return (0); fail12: EFSYS_PROBE(fail12); fail11: EFSYS_PROBE(fail11); fail10: EFSYS_PROBE(fail10); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in a host memory buffer containing a TLV * formatted segment. Historically partitions consisted of only one segment. */ __checkReturn efx_rc_t ef10_nvram_buf_write_tlv( __inout_bcount(max_seg_size) caddr_t seg_data, __in size_t max_seg_size, __in uint32_t tag, __in_bcount(tag_size) caddr_t tag_data, __in size_t tag_size, __out size_t *total_lengthp) { tlv_cursor_t cursor; struct tlv_partition_header *header; struct tlv_partition_trailer *trailer; uint32_t generation; uint32_t cksum; int pos; efx_rc_t rc; /* A PARTITION_HEADER tag must be the first item (at offset zero) */ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data, max_seg_size)) != 0) { rc = EFAULT; goto fail1; } if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) { rc = EINVAL; goto fail2; } header = (struct tlv_partition_header *)tlv_item(&cursor); /* Update the TLV chain to contain the new data */ if ((rc = tlv_find(&cursor, tag)) == 0) { /* Modify existing TLV item */ if ((rc = tlv_modify(&cursor, tag, (uint8_t *)tag_data, tag_size)) != 0) goto fail3; } else { /* Insert a new TLV item before the PARTITION_TRAILER */ rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER); if (rc != 0) { rc = EINVAL; goto fail4; } if ((rc = tlv_insert(&cursor, tag, (uint8_t *)tag_data, tag_size)) != 0) { rc = EINVAL; goto fail5; } } /* Find the trailer tag */ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) { rc = EINVAL; goto fail6; } trailer = (struct tlv_partition_trailer *)tlv_item(&cursor); /* Update PARTITION_HEADER and PARTITION_TRAILER fields */ *total_lengthp = tlv_block_length_used(&cursor); if (*total_lengthp > max_seg_size) { rc = ENOSPC; goto fail7; } generation = __LE_TO_CPU_32(header->generation) + 1; header->total_length = __CPU_TO_LE_32(*total_lengthp); header->generation = __CPU_TO_LE_32(generation); trailer->generation = __CPU_TO_LE_32(generation); /* Recompute PARTITION_TRAILER checksum */ trailer->checksum = 0; cksum = 0; for (pos = 0; (size_t)pos < *total_lengthp; pos += sizeof (uint32_t)) { cksum += *((uint32_t *)(seg_data + pos)); } trailer->checksum = ~cksum + 1; return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in the first segment of a TLV formatted * dynamic config partition. The first segment is the current active * configuration. */ __checkReturn efx_rc_t ef10_nvram_partn_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size) { return ef10_nvram_partn_write_segment_tlv(enp, partn, tag, data, size, B_FALSE); } /* * Read a segment from nvram at the given offset into a buffer (segment_data) * and optionally write a new tag to it. */ static __checkReturn efx_rc_t ef10_nvram_segment_write_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __inout caddr_t *seg_datap, __inout size_t *partn_offsetp, __inout size_t *src_remain_lenp, __inout size_t *dest_remain_lenp, __in boolean_t write) { efx_rc_t rc; efx_rc_t status; size_t original_segment_size; size_t modified_segment_size; /* * Read the segment from NVRAM into the segment_data buffer and validate * it, returning if it does not validate. This is not a failure unless * this is the first segment in a partition. In this case the caller * must propagate the error. */ status = ef10_nvram_read_tlv_segment(enp, partn, *partn_offsetp, *seg_datap, *src_remain_lenp); if (status != 0) { rc = EINVAL; goto fail1; } status = ef10_nvram_buf_segment_size(*seg_datap, *src_remain_lenp, &original_segment_size); if (status != 0) { rc = EINVAL; goto fail2; } if (write) { /* Update the contents of the segment in the buffer */ if ((rc = ef10_nvram_buf_write_tlv(*seg_datap, *dest_remain_lenp, tag, data, size, &modified_segment_size)) != 0) { goto fail3; } *dest_remain_lenp -= modified_segment_size; *seg_datap += modified_segment_size; } else { /* * We won't modify this segment, but still need to update the * remaining lengths and pointers. */ *dest_remain_lenp -= original_segment_size; *seg_datap += original_segment_size; } *partn_offsetp += original_segment_size; *src_remain_lenp -= original_segment_size; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Add or update a single TLV item in either the first segment or in all * segments in a TLV formatted dynamic config partition. Dynamic config * partitions on boards that support RFID are divided into a number of segments, * each formatted like a partition, with header, trailer and end tags. The first * segment is the current active configuration. * * The segments are initialised by manftest and each contain a different * configuration e.g. firmware variant. The firmware can be instructed * via RFID to copy a segment to replace the first segment, hence changing the * active configuration. This allows ops to change the configuration of a board * prior to shipment using RFID. * * Changes to the dynamic config may need to be written to all segments (e.g. * firmware versions) or just the first segment (changes to the active * configuration). See SF-111324-SW "The use of RFID in Solarflare Products". * If only the first segment is written the code still needs to be aware of the * possible presence of subsequent segments as writing to a segment may cause * its size to increase, which would overwrite the subsequent segments and * invalidate them. */ __checkReturn efx_rc_t ef10_nvram_partn_write_segment_tlv( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t tag, __in_bcount(size) caddr_t data, __in size_t size, __in boolean_t all_segments) { size_t partn_size = 0; caddr_t partn_data; size_t total_length = 0; efx_rc_t rc; size_t current_offset = 0; size_t remaining_original_length; size_t remaining_modified_length; caddr_t segment_data; EFSYS_ASSERT3U(partn, ==, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG); /* Allocate sufficient memory for the entire partition */ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0) goto fail1; EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, partn_data); if (partn_data == NULL) { rc = ENOMEM; goto fail2; } remaining_original_length = partn_size; remaining_modified_length = partn_size; segment_data = partn_data; /* Lock the partition */ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0) goto fail3; /* Iterate over each (potential) segment to update it. */ do { boolean_t write = all_segments || current_offset == 0; rc = ef10_nvram_segment_write_tlv(enp, partn, tag, data, size, &segment_data, ¤t_offset, &remaining_original_length, &remaining_modified_length, write); if (rc != 0) { if (current_offset == 0) { /* * If no data has been read then the first * segment is invalid, which is an error. */ goto fail4; } break; } } while (current_offset < partn_size); total_length = segment_data - partn_data; /* * We've run out of space. This should actually be dealt with by * ef10_nvram_buf_write_tlv returning ENOSPC. */ if (total_length > partn_size) { rc = ENOSPC; goto fail5; } /* Erase the whole partition in NVRAM */ if ((rc = ef10_nvram_partn_erase(enp, partn, 0, partn_size)) != 0) goto fail6; /* Write new partition contents from the buffer to NVRAM */ if ((rc = ef10_nvram_partn_write(enp, partn, 0, partn_data, total_length)) != 0) goto fail7; /* Unlock the partition */ ef10_nvram_partn_unlock(enp, partn, NULL); EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data); return (0); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); ef10_nvram_partn_unlock(enp, partn, NULL); fail3: EFSYS_PROBE(fail3); EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Get the size of a NVRAM partition. This is the total size allocated in nvram, * not the data used by the segments in the partition. */ __checkReturn efx_rc_t ef10_nvram_partn_size( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *sizep) { efx_rc_t rc; if ((rc = efx_mcdi_nvram_info(enp, partn, sizep, NULL, NULL, NULL)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_lock( __in efx_nic_t *enp, __in uint32_t partn) { efx_rc_t rc; if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_read_mode( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size, __in uint32_t mode) { size_t chunk; efx_rc_t rc; while (size > 0) { chunk = MIN(size, EF10_NVRAM_CHUNK); if ((rc = efx_mcdi_nvram_read(enp, partn, offset, data, chunk, mode)) != 0) { goto fail1; } size -= chunk; data += chunk; offset += chunk; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_read( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { /* * An A/B partition has two data stores (current and backup). * Read requests which come in through the EFX API expect to read the * current, active store of an A/B partition. For non A/B partitions, * there is only a single store and so the mode param is ignored. */ return ef10_nvram_partn_read_mode(enp, partn, offset, data, size, MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT); } __checkReturn efx_rc_t ef10_nvram_partn_read_backup( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { /* * An A/B partition has two data stores (current and backup). * Read the backup store of an A/B partition (i.e. the store currently * being written to if the partition is locked). * * This is needed when comparing the existing partition content to avoid * unnecessary writes, or to read back what has been written to check * that the writes have succeeded. */ return ef10_nvram_partn_read_mode(enp, partn, offset, data, size, MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP); } __checkReturn efx_rc_t ef10_nvram_partn_erase( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __in size_t size) { efx_rc_t rc; uint32_t erase_size; if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL, &erase_size, NULL)) != 0) goto fail1; if (erase_size == 0) { if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0) goto fail2; } else { if (size % erase_size != 0) { rc = EINVAL; goto fail3; } while (size > 0) { if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, erase_size)) != 0) goto fail4; offset += erase_size; size -= erase_size; } } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_write( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size) { size_t chunk; uint32_t write_size; efx_rc_t rc; if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL, NULL, &write_size)) != 0) goto fail1; if (write_size != 0) { /* * Check that the size is a multiple of the write chunk size if * the write chunk size is available. */ if (size % write_size != 0) { rc = EINVAL; goto fail2; } } else { write_size = EF10_NVRAM_CHUNK; } while (size > 0) { chunk = MIN(size, write_size); if ((rc = efx_mcdi_nvram_write(enp, partn, offset, data, chunk)) != 0) { goto fail3; } size -= chunk; data += chunk; offset += chunk; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_unlock( __in efx_nic_t *enp, __in uint32_t partn, __out_opt uint32_t *verify_resultp) { boolean_t reboot = B_FALSE; efx_rc_t rc; if (verify_resultp != NULL) *verify_resultp = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN; rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, verify_resultp); if (rc != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_set_version( __in efx_nic_t *enp, __in uint32_t partn, __in_ecount(4) uint16_t version[4]) { struct tlv_partition_version partn_version; size_t size; efx_rc_t rc; /* Add or modify partition version TLV item */ partn_version.version_w = __CPU_TO_LE_16(version[0]); partn_version.version_x = __CPU_TO_LE_16(version[1]); partn_version.version_y = __CPU_TO_LE_16(version[2]); partn_version.version_z = __CPU_TO_LE_16(version[3]); size = sizeof (partn_version) - (2 * sizeof (uint32_t)); /* Write the version number to all segments in the partition */ if ((rc = ef10_nvram_partn_write_segment_tlv(enp, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, TLV_TAG_PARTITION_VERSION(partn), (caddr_t)&partn_version.version_w, size, B_TRUE)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ #if EFSYS_OPT_NVRAM typedef struct ef10_parttbl_entry_s { unsigned int partn; unsigned int port_mask; efx_nvram_type_t nvtype; } ef10_parttbl_entry_t; /* Port mask values */ #define PORT_1 (1u << 1) #define PORT_2 (1u << 2) #define PORT_3 (1u << 3) #define PORT_4 (1u << 4) #define PORT_ALL (0xffffffffu) #define PARTN_MAP_ENTRY(partn, port_mask, nvtype) \ { (NVRAM_PARTITION_TYPE_##partn), (PORT_##port_mask), (EFX_NVRAM_##nvtype) } /* Translate EFX NVRAM types to firmware partition types */ static ef10_parttbl_entry_t hunt_parttbl[] = { /* partn ports nvtype */ PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE), PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN), PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM), PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT0, 1, BOOTROM_CFG), PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT1, 2, BOOTROM_CFG), PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT2, 3, BOOTROM_CFG), PARTN_MAP_ENTRY(EXPROM_CONFIG_PORT3, 4, BOOTROM_CFG), PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG), PARTN_MAP_ENTRY(FPGA, ALL, FPGA), PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP), PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE), }; static ef10_parttbl_entry_t medford_parttbl[] = { /* partn ports nvtype */ PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE), PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN), PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM), PARTN_MAP_ENTRY(EXPROM_CONFIG, ALL, BOOTROM_CFG), PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG), PARTN_MAP_ENTRY(FPGA, ALL, FPGA), PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP), PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE), PARTN_MAP_ENTRY(EXPANSION_UEFI, ALL, UEFIROM), PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE), }; static __checkReturn efx_rc_t ef10_parttbl_get( __in efx_nic_t *enp, __out ef10_parttbl_entry_t **parttblp, __out size_t *parttbl_rowsp) { switch (enp->en_family) { case EFX_FAMILY_HUNTINGTON: *parttblp = hunt_parttbl; *parttbl_rowsp = EFX_ARRAY_SIZE(hunt_parttbl); break; case EFX_FAMILY_MEDFORD: *parttblp = medford_parttbl; *parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl); break; default: EFSYS_ASSERT(B_FALSE); return (EINVAL); } return (0); } __checkReturn efx_rc_t ef10_nvram_type_to_partn( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *partnp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); ef10_parttbl_entry_t *parttbl = NULL; size_t parttbl_rows = 0; unsigned int i; EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID); EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES); EFSYS_ASSERT(partnp != NULL); if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) { for (i = 0; i < parttbl_rows; i++) { ef10_parttbl_entry_t *entry = &parttbl[i]; if ((entry->nvtype == type) && (entry->port_mask & (1u << emip->emi_port))) { *partnp = entry->partn; return (0); } } } return (ENOTSUP); } #if EFSYS_OPT_DIAG static __checkReturn efx_rc_t ef10_nvram_partn_to_type( __in efx_nic_t *enp, __in uint32_t partn, __out efx_nvram_type_t *typep) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); ef10_parttbl_entry_t *parttbl = NULL; size_t parttbl_rows = 0; unsigned int i; EFSYS_ASSERT(typep != NULL); if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) { for (i = 0; i < parttbl_rows; i++) { ef10_parttbl_entry_t *entry = &parttbl[i]; if ((entry->partn == partn) && (entry->port_mask & (1u << emip->emi_port))) { *typep = entry->nvtype; return (0); } } } return (ENOTSUP); } __checkReturn efx_rc_t ef10_nvram_test( __in efx_nic_t *enp) { efx_nvram_type_t type; unsigned int npartns = 0; uint32_t *partns = NULL; size_t size; unsigned int i; efx_rc_t rc; /* Read available partitions from NVRAM partition map */ size = MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM * sizeof (uint32_t); EFSYS_KMEM_ALLOC(enp->en_esip, size, partns); if (partns == NULL) { rc = ENOMEM; goto fail1; } if ((rc = efx_mcdi_nvram_partitions(enp, (caddr_t)partns, size, &npartns)) != 0) { goto fail2; } for (i = 0; i < npartns; i++) { /* Check if the partition is supported for this port */ if ((rc = ef10_nvram_partn_to_type(enp, partns[i], &type)) != 0) continue; if ((rc = efx_mcdi_nvram_test(enp, partns[i])) != 0) goto fail3; } EFSYS_KMEM_FREE(enp->en_esip, size, partns); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, size, partns); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ __checkReturn efx_rc_t ef10_nvram_partn_get_version( __in efx_nic_t *enp, __in uint32_t partn, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]) { efx_rc_t rc; /* FIXME: get highest partn version from all ports */ /* FIXME: return partn description if available */ if ((rc = efx_mcdi_nvram_metadata(enp, partn, subtypep, version, NULL, 0)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_rw_start( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *chunk_sizep) { uint32_t write_size = 0; efx_rc_t rc; if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL, NULL, &write_size)) != 0) goto fail1; if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0) goto fail2; if (chunk_sizep != NULL) { if (write_size == 0) *chunk_sizep = EF10_NVRAM_CHUNK; else *chunk_sizep = write_size; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_nvram_partn_rw_finish( __in efx_nic_t *enp, __in uint32_t partn, __out_opt uint32_t *verify_resultp) { efx_rc_t rc; if ((rc = ef10_nvram_partn_unlock(enp, partn, verify_resultp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_NVRAM */ #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: projects/clang700-import/sys/dev/sfxge/common/ef10_phy.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/ef10_phy.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/ef10_phy.c (revision 340918) @@ -1,634 +1,636 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD static void mcdi_phy_decode_cap( __in uint32_t mcdi_cap, __out uint32_t *maskp) { uint32_t mask; mask = 0; if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) mask |= (1 << EFX_PHY_CAP_10HDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) mask |= (1 << EFX_PHY_CAP_10FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) mask |= (1 << EFX_PHY_CAP_100HDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) mask |= (1 << EFX_PHY_CAP_100FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) mask |= (1 << EFX_PHY_CAP_1000HDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_1000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_10000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_40000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) mask |= (1 << EFX_PHY_CAP_PAUSE); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) mask |= (1 << EFX_PHY_CAP_ASYM); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) mask |= (1 << EFX_PHY_CAP_AN); *maskp = mask; } static void mcdi_phy_decode_link_mode( __in efx_nic_t *enp, __in uint32_t link_flags, __in unsigned int speed, __in unsigned int fcntl, __out efx_link_mode_t *link_modep, __out unsigned int *fcntlp) { boolean_t fd = !!(link_flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN)); boolean_t up = !!(link_flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN)); _NOTE(ARGUNUSED(enp)) if (!up) *link_modep = EFX_LINK_DOWN; else if (speed == 40000 && fd) *link_modep = EFX_LINK_40000FDX; else if (speed == 10000 && fd) *link_modep = EFX_LINK_10000FDX; else if (speed == 1000) *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX; else if (speed == 100) *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX; else if (speed == 10) *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX; else *link_modep = EFX_LINK_UNKNOWN; if (fcntl == MC_CMD_FCNTL_OFF) *fcntlp = 0; else if (fcntl == MC_CMD_FCNTL_RESPOND) *fcntlp = EFX_FCNTL_RESPOND; else if (fcntl == MC_CMD_FCNTL_GENERATE) *fcntlp = EFX_FCNTL_GENERATE; else if (fcntl == MC_CMD_FCNTL_BIDIR) *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; else { EFSYS_PROBE1(mc_pcol_error, int, fcntl); *fcntlp = 0; } } void ef10_phy_link_ev( __in efx_nic_t *enp, __in efx_qword_t *eqp, __out efx_link_mode_t *link_modep) { efx_port_t *epp = &(enp->en_port); unsigned int link_flags; unsigned int speed; unsigned int fcntl; efx_link_mode_t link_mode; uint32_t lp_cap_mask; /* * Convert the LINKCHANGE speed enumeration into mbit/s, in the * same way as GET_LINK encodes the speed */ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) { case MCDI_EVENT_LINKCHANGE_SPEED_100M: speed = 100; break; case MCDI_EVENT_LINKCHANGE_SPEED_1G: speed = 1000; break; case MCDI_EVENT_LINKCHANGE_SPEED_10G: speed = 10000; break; case MCDI_EVENT_LINKCHANGE_SPEED_40G: speed = 40000; break; default: speed = 0; break; } link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS); mcdi_phy_decode_link_mode(enp, link_flags, speed, MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL), &link_mode, &fcntl); mcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP), &lp_cap_mask); /* * It's safe to update ep_lp_cap_mask without the driver's port lock * because presumably any concurrently running efx_port_poll() is * only going to arrive at the same value. * * ep_fcntl has two meanings. It's either the link common fcntl * (if the PHY supports AN), or it's the forced link state. If * the former, it's safe to update the value for the same reason as * for ep_lp_cap_mask. If the latter, then just ignore the value, * because we can race with efx_mac_fcntl_set(). */ epp->ep_lp_cap_mask = lp_cap_mask; epp->ep_fcntl = fcntl; *link_modep = link_mode; } __checkReturn efx_rc_t ef10_phy_power( __in efx_nic_t *enp, __in boolean_t power) { efx_rc_t rc; if (!power) return (0); /* Check if the PHY is a zombie */ if ((rc = ef10_phy_verify(enp)) != 0) goto fail1; enp->en_reset_flags |= EFX_RESET_PHY; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_phy_get_link( __in efx_nic_t *enp, __out ef10_link_state_t *elsp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN, MC_CMD_GET_LINK_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_LINK; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_LINK_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) { rc = EMSGSIZE; goto fail2; } mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP), &elsp->els_adv_cap_mask); mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP), &elsp->els_lp_cap_mask); mcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS), MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED), MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL), &elsp->els_link_mode, &elsp->els_fcntl); #if EFSYS_OPT_LOOPBACK /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD); elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE); #endif /* EFSYS_OPT_LOOPBACK */ elsp->els_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_phy_reconfigure( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN, MC_CMD_SET_LINK_OUT_LEN)]; uint32_t cap_mask; +#if EFSYS_OPT_PHY_LED_CONTROL unsigned int led_mode; +#endif unsigned int speed; boolean_t supported; efx_rc_t rc; if ((rc = efx_mcdi_link_control_supported(enp, &supported)) != 0) goto fail1; if (supported == B_FALSE) goto out; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_LINK; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_LINK_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN; cap_mask = epp->ep_adv_cap_mask; MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP, PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1, PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1, PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1, PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1, PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1, PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1, PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1, PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1, PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1, PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1); /* Too many fields for for POPULATE macros, so insert this afterwards */ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1); #if EFSYS_OPT_LOOPBACK MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, epp->ep_loopback_type); switch (epp->ep_loopback_link_mode) { case EFX_LINK_100FDX: speed = 100; break; case EFX_LINK_1000FDX: speed = 1000; break; case EFX_LINK_10000FDX: speed = 10000; break; case EFX_LINK_40000FDX: speed = 40000; break; default: speed = 0; } #else MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE); speed = 0; #endif /* EFSYS_OPT_LOOPBACK */ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed); #if EFSYS_OPT_PHY_FLAGS MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags); #else MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0); #endif /* EFSYS_OPT_PHY_FLAGS */ efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } /* And set the blink mode */ (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_ID_LED; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN; #if EFSYS_OPT_PHY_LED_CONTROL switch (epp->ep_phy_led_mode) { case EFX_PHY_LED_DEFAULT: led_mode = MC_CMD_LED_DEFAULT; break; case EFX_PHY_LED_OFF: led_mode = MC_CMD_LED_OFF; break; case EFX_PHY_LED_ON: led_mode = MC_CMD_LED_ON; break; default: EFSYS_ASSERT(0); led_mode = MC_CMD_LED_DEFAULT; } MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode); #else MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT); #endif /* EFSYS_OPT_PHY_LED_CONTROL */ efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } out: return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_phy_verify( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN, MC_CMD_GET_PHY_STATE_OUT_LEN)]; uint32_t state; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PHY_STATE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) { rc = EMSGSIZE; goto fail2; } state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE); if (state != MC_CMD_PHY_STATE_OK) { if (state != MC_CMD_PHY_STATE_ZOMBIE) EFSYS_PROBE1(mc_pcol_error, int, state); rc = ENOTACTIVE; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip) { _NOTE(ARGUNUSED(enp, ouip)) return (ENOTSUP); } #if EFSYS_OPT_PHY_STATS __checkReturn efx_rc_t ef10_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat) { /* TBD: no stats support in firmware yet */ _NOTE(ARGUNUSED(enp, esmp)) memset(stat, 0, EFX_PHY_NSTATS * sizeof (*stat)); return (0); } #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_BIST __checkReturn efx_rc_t ef10_bist_enable_offline( __in efx_nic_t *enp) { efx_rc_t rc; if ((rc = efx_mcdi_bist_enable_offline(enp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type) { efx_rc_t rc; if ((rc = efx_mcdi_bist_start(enp, type)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt __drv_when(count > 0, __notnull) uint32_t *value_maskp, __out_ecount_opt(count) __drv_when(count > 0, __notnull) unsigned long *valuesp, __in size_t count) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN, MCDI_CTL_SDU_LEN_MAX)]; uint32_t value_mask = 0; uint32_t result; efx_rc_t rc; _NOTE(ARGUNUSED(type)) (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_POLL_BIST; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MCDI_CTL_SDU_LEN_MAX; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) { rc = EMSGSIZE; goto fail2; } if (count > 0) (void) memset(valuesp, '\0', count * sizeof (unsigned long)); result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT); if (result == MC_CMD_POLL_BIST_FAILED && req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MEM_LEN && count > EFX_BIST_MEM_ECC_FATAL) { if (valuesp != NULL) { valuesp[EFX_BIST_MEM_TEST] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_TEST); valuesp[EFX_BIST_MEM_ADDR] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ADDR); valuesp[EFX_BIST_MEM_BUS] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_BUS); valuesp[EFX_BIST_MEM_EXPECT] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_EXPECT); valuesp[EFX_BIST_MEM_ACTUAL] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ACTUAL); valuesp[EFX_BIST_MEM_ECC] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC); valuesp[EFX_BIST_MEM_ECC_PARITY] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_PARITY); valuesp[EFX_BIST_MEM_ECC_FATAL] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_FATAL); } value_mask |= (1 << EFX_BIST_MEM_TEST) | (1 << EFX_BIST_MEM_ADDR) | (1 << EFX_BIST_MEM_BUS) | (1 << EFX_BIST_MEM_EXPECT) | (1 << EFX_BIST_MEM_ACTUAL) | (1 << EFX_BIST_MEM_ECC) | (1 << EFX_BIST_MEM_ECC_PARITY) | (1 << EFX_BIST_MEM_ECC_FATAL); } else if (result == MC_CMD_POLL_BIST_FAILED && encp->enc_phy_type == EFX_PHY_XFI_FARMI && req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN && count > EFX_BIST_FAULT_CODE) { if (valuesp != NULL) valuesp[EFX_BIST_FAULT_CODE] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST); value_mask |= 1 << EFX_BIST_FAULT_CODE; } if (value_maskp != NULL) *value_maskp = value_mask; EFSYS_ASSERT(resultp != NULL); if (result == MC_CMD_POLL_BIST_RUNNING) *resultp = EFX_BIST_RESULT_RUNNING; else if (result == MC_CMD_POLL_BIST_PASSED) *resultp = EFX_BIST_RESULT_PASSED; else *resultp = EFX_BIST_RESULT_FAILED; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type) { /* There is no way to stop BIST on EF10. */ _NOTE(ARGUNUSED(enp, type)) } #endif /* EFSYS_OPT_BIST */ #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: projects/clang700-import/sys/dev/sfxge/common/ef10_rx.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/ef10_rx.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/ef10_rx.c (revision 340918) @@ -1,1089 +1,1117 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD static __checkReturn efx_rc_t efx_mcdi_init_rxq( __in efx_nic_t *enp, - __in uint32_t size, + __in uint32_t ndescs, __in uint32_t target_evq, __in uint32_t label, __in uint32_t instance, __in efsys_mem_t *esmp, __in boolean_t disable_scatter, + __in boolean_t want_inner_classes, __in uint32_t ps_bufsize) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_INIT_RXQ_EXT_IN_LEN, MC_CMD_INIT_RXQ_EXT_OUT_LEN)]; - int npages = EFX_RXQ_NBUFS(size); + int npages = EFX_RXQ_NBUFS(ndescs); int i; efx_qword_t *dma_addr; uint64_t addr; efx_rc_t rc; uint32_t dma_mode; boolean_t want_outer_classes; - /* If this changes, then the payload size might need to change. */ - EFSYS_ASSERT3U(MC_CMD_INIT_RXQ_OUT_LEN, ==, 0); - EFSYS_ASSERT3U(size, <=, EFX_RXQ_MAXNDESCS); + EFSYS_ASSERT3U(ndescs, <=, EFX_RXQ_MAXNDESCS); if (ps_bufsize > 0) dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM; else dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET; - if (encp->enc_tunnel_encapsulations_supported != 0) { + if (encp->enc_tunnel_encapsulations_supported != 0 && + !want_inner_classes) { /* * WANT_OUTER_CLASSES can only be specified on hardware which * supports tunnel encapsulation offloads, even though it is * effectively the behaviour the hardware gives. * * Also, on hardware which does support such offloads, older * firmware rejects the flag if the offloads are not supported * by the current firmware variant, which means this may fail if * the capabilities are not updated when the firmware variant * changes. This is not an issue on newer firmware, as it was * changed in bug 69842 (v6.4.2.1007) to permit this flag to be * specified on all firmware variants. */ want_outer_classes = B_TRUE; } else { want_outer_classes = B_FALSE; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_RXQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_RXQ_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_RXQ_EXT_OUT_LEN; - MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, size); + MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance); MCDI_IN_POPULATE_DWORD_9(req, INIT_RXQ_EXT_IN_FLAGS, INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0, INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0, INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0, INIT_RXQ_EXT_IN_CRC_MODE, 0, INIT_RXQ_EXT_IN_FLAG_PREFIX, 1, INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter, INIT_RXQ_EXT_IN_DMA_MODE, dma_mode, INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize, INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_fini_rxq( __in efx_nic_t *enp, __in uint32_t instance) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN, MC_CMD_FINI_RXQ_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FINI_RXQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN; MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: /* * EALREADY is not an error, but indicates that the MC has rebooted and * that the RXQ has already been destroyed. */ if (rc != EALREADY) EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_RX_SCALE static __checkReturn efx_rc_t efx_mcdi_rss_context_alloc( __in efx_nic_t *enp, __in efx_rx_scale_context_type_t type, __in uint32_t num_queues, __out uint32_t *rss_contextp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)]; uint32_t rss_context; uint32_t context_type; efx_rc_t rc; if (num_queues > EFX_MAXRSS) { rc = EINVAL; goto fail1; } switch (type) { case EFX_RX_SCALE_EXCLUSIVE: context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE; break; case EFX_RX_SCALE_SHARED: context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; break; default: rc = EINVAL; goto fail2; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, EVB_PORT_ID_ASSIGNED); MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, context_type); - /* NUM_QUEUES is only used to validate indirection table offsets */ + + /* + * For exclusive contexts, NUM_QUEUES is only used to validate + * indirection table offsets. + * For shared contexts, the provided context will spread traffic over + * NUM_QUEUES many queues. + */ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, num_queues); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } if (req.emr_out_length_used < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) { rc = EMSGSIZE; goto fail4; } rss_context = MCDI_OUT_DWORD(req, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); if (rss_context == EF10_RSS_CONTEXT_INVALID) { rc = ENOENT; goto fail5; } *rss_contextp = rss_context; return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE static efx_rc_t efx_mcdi_rss_context_free( __in efx_nic_t *enp, __in uint32_t rss_context) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_FREE_IN_LEN, MC_CMD_RSS_CONTEXT_FREE_OUT_LEN)]; efx_rc_t rc; if (rss_context == EF10_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_FREE_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE static efx_rc_t efx_mcdi_rss_context_set_flags( __in efx_nic_t *enp, __in uint32_t rss_context, __in efx_rx_hash_type_t type) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN, MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)]; efx_rc_t rc; if (rss_context == EF10_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, rss_context); MCDI_IN_POPULATE_DWORD_4(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN, (type & EFX_RX_HASH_IPV4) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN, (type & EFX_RX_HASH_TCPIPV4) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN, (type & EFX_RX_HASH_IPV6) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN, (type & EFX_RX_HASH_TCPIPV6) ? 1 : 0); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE static efx_rc_t efx_mcdi_rss_context_set_key( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) uint8_t *key, __in size_t n) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN, MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN)]; efx_rc_t rc; if (rss_context == EF10_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, rss_context); EFSYS_ASSERT3U(n, ==, MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); if (n != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN) { rc = EINVAL; goto fail2; } memcpy(MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY), key, n); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE static efx_rc_t efx_mcdi_rss_context_set_table( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) unsigned int *table, __in size_t n) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN, MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN)]; uint8_t *req_table; int i, rc; if (rss_context == EF10_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN; MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, rss_context); req_table = MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE); for (i = 0; i < MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN; i++) { req_table[i] = (n > 0) ? (uint8_t)table[i % n] : 0; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ __checkReturn efx_rc_t ef10_rx_init( __in efx_nic_t *enp) { #if EFSYS_OPT_RX_SCALE if (efx_mcdi_rss_context_alloc(enp, EFX_RX_SCALE_EXCLUSIVE, EFX_MAXRSS, &enp->en_rss_context) == 0) { /* * Allocated an exclusive RSS context, which allows both the * indirection table and key to be modified. */ enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE; enp->en_hash_support = EFX_RX_HASH_AVAILABLE; } else { /* * Failed to allocate an exclusive RSS context. Continue * operation without support for RSS. The pseudo-header in * received packets will not contain a Toeplitz hash value. */ enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE; enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE; } #endif /* EFSYS_OPT_RX_SCALE */ return (0); } #if EFSYS_OPT_RX_SCATTER __checkReturn efx_rc_t ef10_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size) { _NOTE(ARGUNUSED(enp, buf_size)) return (0); } #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t ef10_rx_scale_context_alloc( __in efx_nic_t *enp, __in efx_rx_scale_context_type_t type, __in uint32_t num_queues, __out uint32_t *rss_contextp) { efx_rc_t rc; rc = efx_mcdi_rss_context_alloc(enp, type, num_queues, rss_contextp); if (rc != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t ef10_rx_scale_context_free( __in efx_nic_t *enp, __in uint32_t rss_context) { efx_rc_t rc; rc = efx_mcdi_rss_context_free(enp, rss_context); if (rc != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t ef10_rx_scale_mode_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert) { efx_rc_t rc; EFSYS_ASSERT3U(alg, ==, EFX_RX_HASHALG_TOEPLITZ); EFSYS_ASSERT3U(insert, ==, B_TRUE); if ((alg != EFX_RX_HASHALG_TOEPLITZ) || (insert == B_FALSE)) { rc = EINVAL; goto fail1; } if (rss_context == EFX_RSS_CONTEXT_DEFAULT) { if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) { rc = ENOTSUP; goto fail2; } rss_context = enp->en_rss_context; } if ((rc = efx_mcdi_rss_context_set_flags(enp, rss_context, type)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t ef10_rx_scale_key_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) uint8_t *key, __in size_t n) { efx_rc_t rc; EFX_STATIC_ASSERT(EFX_RSS_KEY_SIZE == MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); if (rss_context == EFX_RSS_CONTEXT_DEFAULT) { if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) { rc = ENOTSUP; goto fail1; } rss_context = enp->en_rss_context; } if ((rc = efx_mcdi_rss_context_set_key(enp, rss_context, key, n)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t ef10_rx_scale_tbl_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) unsigned int *table, __in size_t n) { efx_rc_t rc; if (rss_context == EFX_RSS_CONTEXT_DEFAULT) { if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) { rc = ENOTSUP; goto fail1; } rss_context = enp->en_rss_context; } if ((rc = efx_mcdi_rss_context_set_table(enp, rss_context, table, n)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ /* * EF10 RX pseudo-header * --------------------- * * Receive packets are prefixed by an (optional) 14 byte pseudo-header: * * +00: Toeplitz hash value. * (32bit little-endian) * +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag. * (16bit big-endian) * +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag. * (16bit big-endian) * +08: Packet Length. Zero if the RX datapath was in cut-through mode. * (16bit little-endian) * +10: MAC timestamp. Zero if timestamping is not enabled. * (32bit little-endian) * * See "The RX Pseudo-header" in SF-109306-TC. */ __checkReturn efx_rc_t ef10_rx_prefix_pktlen( __in efx_nic_t *enp, __in uint8_t *buffer, __out uint16_t *lengthp) { _NOTE(ARGUNUSED(enp)) /* * The RX pseudo-header contains the packet length, excluding the * pseudo-header. If the hardware receive datapath was operating in * cut-through mode then the length in the RX pseudo-header will be * zero, and the packet length must be obtained from the DMA length * reported in the RX event. */ *lengthp = buffer[8] | (buffer[9] << 8); return (0); } #if EFSYS_OPT_RX_SCALE __checkReturn uint32_t ef10_rx_prefix_hash( __in efx_nic_t *enp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer) { _NOTE(ARGUNUSED(enp)) switch (func) { case EFX_RX_HASHALG_TOEPLITZ: return (buffer[0] | (buffer[1] << 8) | (buffer[2] << 16) | (buffer[3] << 24)); default: EFSYS_ASSERT(0); return (0); } } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_PACKED_STREAM /* * Fake length for RXQ descriptors in packed stream mode * to make hardware happy */ #define EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE 32 #endif - void + void ef10_rx_qpost( - __in efx_rxq_t *erp, - __in_ecount(n) efsys_dma_addr_t *addrp, - __in size_t size, - __in unsigned int n, - __in unsigned int completed, - __in unsigned int added) + __in efx_rxq_t *erp, + __in_ecount(ndescs) efsys_dma_addr_t *addrp, + __in size_t size, + __in unsigned int ndescs, + __in unsigned int completed, + __in unsigned int added) { efx_qword_t qword; unsigned int i; unsigned int offset; unsigned int id; + _NOTE(ARGUNUSED(completed)) + #if EFSYS_OPT_RX_PACKED_STREAM /* * Real size of the buffer does not fit into ESF_DZ_RX_KER_BYTE_CNT * and equal to 0 after applying mask. Hardware does not like it. */ if (erp->er_ev_qstate->eers_rx_packed_stream) size = EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE; #endif /* The client driver must not overfill the queue */ - EFSYS_ASSERT3U(added - completed + n, <=, + EFSYS_ASSERT3U(added - completed + ndescs, <=, EFX_RXQ_LIMIT(erp->er_mask + 1)); id = added & (erp->er_mask); - for (i = 0; i < n; i++) { + for (i = 0; i < ndescs; i++) { EFSYS_PROBE4(rx_post, unsigned int, erp->er_index, unsigned int, id, efsys_dma_addr_t, addrp[i], size_t, size); EFX_POPULATE_QWORD_3(qword, ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size), ESF_DZ_RX_KER_BUF_ADDR_DW0, (uint32_t)(addrp[i] & 0xffffffff), ESF_DZ_RX_KER_BUF_ADDR_DW1, (uint32_t)(addrp[i] >> 32)); offset = id * sizeof (efx_qword_t); EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword); id = (id + 1) & (erp->er_mask); } } void ef10_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp) { efx_nic_t *enp = erp->er_enp; unsigned int pushed = *pushedp; uint32_t wptr; efx_dword_t dword; /* Hardware has alignment restriction for WPTR */ wptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN); if (pushed == wptr) return; *pushedp = wptr; /* Push the populated descriptors out */ wptr &= erp->er_mask; EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr); /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1, wptr, pushed & erp->er_mask); EFSYS_PIO_WRITE_BARRIER(); EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, erp->er_index, &dword, B_FALSE); } #if EFSYS_OPT_RX_PACKED_STREAM void ef10_rx_qpush_ps_credits( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_dword_t dword; efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate; uint32_t credits; EFSYS_ASSERT(rxq_state->eers_rx_packed_stream); if (rxq_state->eers_rx_packed_stream_credits == 0) return; /* * It is a bug if we think that FW has utilized more * credits than it is allowed to have (maximum). However, * make sure that we do not credit more than maximum anyway. */ credits = MIN(rxq_state->eers_rx_packed_stream_credits, EFX_RX_PACKED_STREAM_MAX_CREDITS); EFX_POPULATE_DWORD_3(dword, ERF_DZ_RX_DESC_MAGIC_DOORBELL, 1, ERF_DZ_RX_DESC_MAGIC_CMD, ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS, ERF_DZ_RX_DESC_MAGIC_DATA, credits); EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, erp->er_index, &dword, B_FALSE); rxq_state->eers_rx_packed_stream_credits = 0; } /* * In accordance with SF-112241-TC the received data has the following layout: * - 8 byte pseudo-header which consist of: * - 4 byte little-endian timestamp * - 2 byte little-endian captured length in bytes * - 2 byte little-endian original packet length in bytes * - captured packet bytes * - optional padding to align to 64 bytes boundary * - 64 bytes scratch space for the host software */ __checkReturn uint8_t * ef10_rx_qps_packet_info( __in efx_rxq_t *erp, __in uint8_t *buffer, __in uint32_t buffer_length, __in uint32_t current_offset, __out uint16_t *lengthp, __out uint32_t *next_offsetp, __out uint32_t *timestamp) { uint16_t buf_len; uint8_t *pkt_start; efx_qword_t *qwordp; efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate; EFSYS_ASSERT(rxq_state->eers_rx_packed_stream); buffer += current_offset; pkt_start = buffer + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE; qwordp = (efx_qword_t *)buffer; *timestamp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_TSTAMP); *lengthp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_ORIG_LEN); buf_len = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_CAP_LEN); buf_len = P2ROUNDUP(buf_len + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE, EFX_RX_PACKED_STREAM_ALIGNMENT); *next_offsetp = current_offset + buf_len + EFX_RX_PACKED_STREAM_ALIGNMENT; EFSYS_ASSERT3U(*next_offsetp, <=, buffer_length); EFSYS_ASSERT3U(current_offset + *lengthp, <, *next_offsetp); if ((*next_offsetp ^ current_offset) & EFX_RX_PACKED_STREAM_MEM_PER_CREDIT) rxq_state->eers_rx_packed_stream_credits++; return (pkt_start); } #endif __checkReturn efx_rc_t ef10_rx_qflush( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_rc_t rc; if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0) goto fail1; return (0); fail1: /* * EALREADY is not an error, but indicates that the MC has rebooted and * that the RXQ has already been destroyed. Callers need to know that * the RXQ flush has completed to avoid waiting until timeout for a * flush done event that will not be delivered. */ if (rc != EALREADY) EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_rx_qenable( __in efx_rxq_t *erp) { /* FIXME */ _NOTE(ARGUNUSED(erp)) /* FIXME */ } __checkReturn efx_rc_t ef10_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, + __in uint32_t type_data, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, + __in unsigned int flags, __in efx_evq_t *eep, __in efx_rxq_t *erp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_rc_t rc; boolean_t disable_scatter; + boolean_t want_inner_classes; unsigned int ps_buf_size; - _NOTE(ARGUNUSED(id, erp)) + _NOTE(ARGUNUSED(id, erp, type_data)) EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH)); EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS); EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit); EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS)); EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS)); - if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) { + if (!ISP2(ndescs) || + (ndescs < EFX_RXQ_MINNDESCS) || (ndescs > EFX_RXQ_MAXNDESCS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_rxq_limit) { rc = EINVAL; goto fail2; } switch (type) { case EFX_RXQ_TYPE_DEFAULT: - case EFX_RXQ_TYPE_SCATTER: ps_buf_size = 0; break; #if EFSYS_OPT_RX_PACKED_STREAM - case EFX_RXQ_TYPE_PACKED_STREAM_1M: - ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M; + case EFX_RXQ_TYPE_PACKED_STREAM: + switch (type_data) { + case EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M: + ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M; + break; + case EFX_RXQ_PACKED_STREAM_BUF_SIZE_512K: + ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K; + break; + case EFX_RXQ_PACKED_STREAM_BUF_SIZE_256K: + ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K; + break; + case EFX_RXQ_PACKED_STREAM_BUF_SIZE_128K: + ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K; + break; + case EFX_RXQ_PACKED_STREAM_BUF_SIZE_64K: + ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K; + break; + default: + rc = ENOTSUP; + goto fail3; + } break; - case EFX_RXQ_TYPE_PACKED_STREAM_512K: - ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K; - break; - case EFX_RXQ_TYPE_PACKED_STREAM_256K: - ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K; - break; - case EFX_RXQ_TYPE_PACKED_STREAM_128K: - ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K; - break; - case EFX_RXQ_TYPE_PACKED_STREAM_64K: - ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K; - break; #endif /* EFSYS_OPT_RX_PACKED_STREAM */ default: rc = ENOTSUP; - goto fail3; + goto fail4; } #if EFSYS_OPT_RX_PACKED_STREAM if (ps_buf_size != 0) { /* Check if datapath firmware supports packed stream mode */ if (encp->enc_rx_packed_stream_supported == B_FALSE) { rc = ENOTSUP; - goto fail4; + goto fail5; } /* Check if packed stream allows configurable buffer sizes */ - if ((type != EFX_RXQ_TYPE_PACKED_STREAM_1M) && + if ((ps_buf_size != MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M) && (encp->enc_rx_var_packed_stream_supported == B_FALSE)) { rc = ENOTSUP; - goto fail5; + goto fail6; } } #else /* EFSYS_OPT_RX_PACKED_STREAM */ EFSYS_ASSERT(ps_buf_size == 0); #endif /* EFSYS_OPT_RX_PACKED_STREAM */ /* Scatter can only be disabled if the firmware supports doing so */ - if (type == EFX_RXQ_TYPE_SCATTER) + if (flags & EFX_RXQ_FLAG_SCATTER) disable_scatter = B_FALSE; else disable_scatter = encp->enc_rx_disable_scatter_supported; - if ((rc = efx_mcdi_init_rxq(enp, n, eep->ee_index, label, index, - esmp, disable_scatter, ps_buf_size)) != 0) - goto fail6; + if (flags & EFX_RXQ_FLAG_INNER_CLASSES) + want_inner_classes = B_TRUE; + else + want_inner_classes = B_FALSE; + if ((rc = efx_mcdi_init_rxq(enp, ndescs, eep->ee_index, label, index, + esmp, disable_scatter, want_inner_classes, + ps_buf_size)) != 0) + goto fail7; + erp->er_eep = eep; erp->er_label = label; ef10_ev_rxlabel_init(eep, erp, label, type); erp->er_ev_qstate = &erp->er_eep->ee_rxq_state[label]; return (0); +fail7: + EFSYS_PROBE(fail7); +#if EFSYS_OPT_RX_PACKED_STREAM fail6: EFSYS_PROBE(fail6); -#if EFSYS_OPT_RX_PACKED_STREAM fail5: EFSYS_PROBE(fail5); +#endif /* EFSYS_OPT_RX_PACKED_STREAM */ fail4: EFSYS_PROBE(fail4); -#endif /* EFSYS_OPT_RX_PACKED_STREAM */ +#if EFSYS_OPT_RX_PACKED_STREAM fail3: EFSYS_PROBE(fail3); +#endif /* EFSYS_OPT_RX_PACKED_STREAM */ fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_rx_qdestroy( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_evq_t *eep = erp->er_eep; unsigned int label = erp->er_label; ef10_ev_rxlabel_fini(eep, label); EFSYS_ASSERT(enp->en_rx_qcount != 0); --enp->en_rx_qcount; EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp); } void ef10_rx_fini( __in efx_nic_t *enp) { #if EFSYS_OPT_RX_SCALE if (enp->en_rss_context_type != EFX_RX_SCALE_UNAVAILABLE) (void) efx_mcdi_rss_context_free(enp, enp->en_rss_context); enp->en_rss_context = 0; enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE; #else _NOTE(ARGUNUSED(enp)) #endif /* EFSYS_OPT_RX_SCALE */ } #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: projects/clang700-import/sys/dev/sfxge/common/ef10_tx.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/ef10_tx.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/ef10_tx.c (revision 340918) @@ -1,774 +1,782 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD #if EFSYS_OPT_QSTATS #define EFX_TX_QSTAT_INCR(_etp, _stat) \ do { \ (_etp)->et_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_TX_QSTAT_INCR(_etp, _stat) #endif static __checkReturn efx_rc_t efx_mcdi_init_txq( __in efx_nic_t *enp, - __in uint32_t size, + __in uint32_t ndescs, __in uint32_t target_evq, __in uint32_t label, __in uint32_t instance, __in uint16_t flags, __in efsys_mem_t *esmp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS), MC_CMD_INIT_TXQ_OUT_LEN)]; efx_qword_t *dma_addr; uint64_t addr; int npages; int i; efx_rc_t rc; EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >= EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs)); - npages = EFX_TXQ_NBUFS(size); + npages = EFX_TXQ_NBUFS(ndescs); if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_TXQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages); req.emr_out_buf = payload; req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN; - MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size); + MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance); MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS, INIT_TXQ_IN_FLAG_BUFF_MODE, 0, INIT_TXQ_IN_FLAG_IP_CSUM_DIS, (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1, INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1, INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN, (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0, INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN, (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0, INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0, INIT_TXQ_IN_CRC_MODE, 0, INIT_TXQ_IN_FLAG_TIMESTAMP, 0); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0); MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); for (i = 0; i < npages; i++) { EFX_POPULATE_QWORD_2(*dma_addr, EFX_DWORD_1, (uint32_t)(addr >> 32), EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); dma_addr++; addr += EFX_BUF_SIZE; } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_fini_txq( __in efx_nic_t *enp, __in uint32_t instance) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN, MC_CMD_FINI_TXQ_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FINI_TXQ; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN; MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: /* * EALREADY is not an error, but indicates that the MC has rebooted and * that the TXQ has already been destroyed. */ if (rc != EALREADY) EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_tx_init( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) return (0); } void ef10_tx_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } __checkReturn efx_rc_t ef10_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __in efx_txq_t *etp, __out unsigned int *addedp) { efx_nic_cfg_t *encp = &enp->en_nic_cfg; uint16_t inner_csum; efx_qword_t desc; efx_rc_t rc; _NOTE(ARGUNUSED(id)) inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP; if (((flags & inner_csum) != 0) && (encp->enc_tunnel_encapsulations_supported == 0)) { rc = EINVAL; goto fail1; } - if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags, - esmp)) != 0) + if ((rc = efx_mcdi_init_txq(enp, ndescs, eep->ee_index, label, index, + flags, esmp)) != 0) goto fail2; /* * A previous user of this TX queue may have written a descriptor to the * TX push collector, but not pushed the doorbell (e.g. after a crash). * The next doorbell write would then push the stale descriptor. * * Ensure the (per network port) TX push collector is cleared by writing * a no-op TX option descriptor. See bug29981 for details. */ *addedp = 1; EFX_POPULATE_QWORD_6(desc, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM, ESF_DZ_TX_OPTION_UDP_TCP_CSUM, (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0, ESF_DZ_TX_OPTION_IP_CSUM, (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0, ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, ESF_DZ_TX_OPTION_INNER_IP_CSUM, (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0); EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc); ef10_tx_qpush(etp, *addedp, 0); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_tx_qdestroy( __in efx_txq_t *etp) { /* FIXME */ _NOTE(ARGUNUSED(etp)) /* FIXME */ } __checkReturn efx_rc_t ef10_tx_qpio_enable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_piobuf_handle_t handle; efx_rc_t rc; if (etp->et_pio_size != 0) { rc = EALREADY; goto fail1; } /* Sub-allocate a PIO block from a piobuf */ if ((rc = ef10_nic_pio_alloc(enp, &etp->et_pio_bufnum, &handle, &etp->et_pio_blknum, &etp->et_pio_offset, &etp->et_pio_size)) != 0) { goto fail2; } EFSYS_ASSERT3U(etp->et_pio_size, !=, 0); /* Link the piobuf to this TXQ */ if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) { goto fail3; } /* * et_pio_offset is the offset of the sub-allocated block within the * hardware PIO buffer. It is used as the buffer address in the PIO * option descriptor. * * et_pio_write_offset is the offset of the sub-allocated block from the * start of the write-combined memory mapping, and is used for writing * data into the PIO buffer. */ etp->et_pio_write_offset = (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) + ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset; return (0); fail3: EFSYS_PROBE(fail3); ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); fail2: EFSYS_PROBE(fail2); etp->et_pio_size = 0; fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_tx_qpio_disable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; if (etp->et_pio_size != 0) { /* Unlink the piobuf from this TXQ */ ef10_nic_pio_unlink(enp, etp->et_index); /* Free the sub-allocated PIO block */ ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum); etp->et_pio_size = 0; etp->et_pio_write_offset = 0; } } __checkReturn efx_rc_t ef10_tx_qpio_write( __in efx_txq_t *etp, __in_ecount(length) uint8_t *buffer, __in size_t length, __in size_t offset) { efx_nic_t *enp = etp->et_enp; efsys_bar_t *esbp = enp->en_esbp; uint32_t write_offset; uint32_t write_offset_limit; efx_qword_t *eqp; efx_rc_t rc; EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0); if (etp->et_pio_size == 0) { rc = ENOENT; goto fail1; } if (offset + length > etp->et_pio_size) { rc = ENOSPC; goto fail2; } /* * Writes to PIO buffers must be 64 bit aligned, and multiples of * 64 bits. */ write_offset = etp->et_pio_write_offset + offset; write_offset_limit = write_offset + length; eqp = (efx_qword_t *)buffer; while (write_offset < write_offset_limit) { EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp); eqp++; write_offset += sizeof (efx_qword_t); } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_tx_qpio_post( __in efx_txq_t *etp, __in size_t pkt_length, __in unsigned int completed, __inout unsigned int *addedp) { efx_qword_t pio_desc; unsigned int id; size_t offset; unsigned int added = *addedp; efx_rc_t rc; if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) { rc = ENOSPC; goto fail1; } if (etp->et_pio_size == 0) { rc = ENOENT; goto fail2; } id = added++ & etp->et_mask; offset = id * sizeof (efx_qword_t); EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index, unsigned int, id, uint32_t, etp->et_pio_offset, size_t, pkt_length); EFX_POPULATE_QWORD_5(pio_desc, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, 1, ESF_DZ_TX_PIO_CONT, 0, ESF_DZ_TX_PIO_BYTE_CNT, pkt_length, ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset); EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc); EFX_TX_QSTAT_INCR(etp, TX_POST_PIO); *addedp = added; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn efx_rc_t + __checkReturn efx_rc_t ef10_tx_qpost( - __in efx_txq_t *etp, - __in_ecount(n) efx_buffer_t *eb, - __in unsigned int n, - __in unsigned int completed, - __inout unsigned int *addedp) + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_buffer_t *eb, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) { unsigned int added = *addedp; unsigned int i; efx_rc_t rc; - if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) { + if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) { rc = ENOSPC; goto fail1; } - for (i = 0; i < n; i++) { + for (i = 0; i < ndescs; i++) { efx_buffer_t *ebp = &eb[i]; efsys_dma_addr_t addr = ebp->eb_addr; size_t size = ebp->eb_size; boolean_t eop = ebp->eb_eop; unsigned int id; size_t offset; efx_qword_t qword; /* No limitations on boundary crossing */ EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); id = added++ & etp->et_mask; offset = id * sizeof (efx_qword_t); EFSYS_PROBE5(tx_post, unsigned int, etp->et_index, unsigned int, id, efsys_dma_addr_t, addr, size_t, size, boolean_t, eop); EFX_POPULATE_QWORD_5(qword, ESF_DZ_TX_KER_TYPE, 0, ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword); } EFX_TX_QSTAT_INCR(etp, TX_POST); *addedp = added; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * This improves performance by, when possible, pushing a TX descriptor at the * same time as the doorbell. The descriptor must be added to the TXQ, so that * can be used if the hardware decides not to use the pushed descriptor. */ void ef10_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed) { efx_nic_t *enp = etp->et_enp; unsigned int wptr; unsigned int id; size_t offset; efx_qword_t desc; efx_oword_t oword; wptr = added & etp->et_mask; id = pushed & etp->et_mask; offset = id * sizeof (efx_qword_t); EFSYS_MEM_READQ(etp->et_esmp, offset, &desc); /* * SF Bug 65776: TSO option descriptors cannot be pushed if pacer bypass * is enabled on the event queue this transmit queue is attached to. * * To ensure the code is safe, it is easiest to simply test the type of * the descriptor to push, and only push it is if it not a TSO option * descriptor. */ if ((EFX_QWORD_FIELD(desc, ESF_DZ_TX_DESC_IS_OPT) != 1) || (EFX_QWORD_FIELD(desc, ESF_DZ_TX_OPTION_TYPE) != ESE_DZ_TX_OPTION_DESC_TSO)) { /* Push the descriptor and update the wptr. */ EFX_POPULATE_OWORD_3(oword, ERF_DZ_TX_DESC_WPTR, wptr, ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1), ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0)); /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id); EFSYS_PIO_WRITE_BARRIER(); EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index, &oword); } else { efx_dword_t dword; /* * Only update the wptr. This is signalled to the hardware by * only writing one DWORD of the doorbell register. */ EFX_POPULATE_OWORD_1(oword, ERF_DZ_TX_DESC_WPTR, wptr); dword = oword.eo_dword[2]; /* Ensure ordering of memory (descriptors) and PIO (doorbell) */ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id); EFSYS_PIO_WRITE_BARRIER(); EFX_BAR_TBL_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index, &dword, B_FALSE); } } - __checkReturn efx_rc_t + __checkReturn efx_rc_t ef10_tx_qdesc_post( - __in efx_txq_t *etp, - __in_ecount(n) efx_desc_t *ed, - __in unsigned int n, - __in unsigned int completed, - __inout unsigned int *addedp) + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_desc_t *ed, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) { unsigned int added = *addedp; unsigned int i; efx_rc_t rc; - if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) { + if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) { rc = ENOSPC; goto fail1; } - for (i = 0; i < n; i++) { + for (i = 0; i < ndescs; i++) { efx_desc_t *edp = &ed[i]; unsigned int id; size_t offset; id = added++ & etp->et_mask; offset = id * sizeof (efx_desc_t); EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq); } EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index, - unsigned int, added, unsigned int, n); + unsigned int, added, unsigned int, ndescs); EFX_TX_QSTAT_INCR(etp, TX_POST); *addedp = added; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp) { + _NOTE(ARGUNUSED(etp)) + /* No limitations on boundary crossing */ EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max); EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index, efsys_dma_addr_t, addr, size_t, size, boolean_t, eop); EFX_POPULATE_QWORD_5(edp->ed_eq, ESF_DZ_TX_KER_TYPE, 0, ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1, ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size), ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); } void ef10_tx_qdesc_tso_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint8_t tcp_flags, __out efx_desc_t *edp) { + _NOTE(ARGUNUSED(etp)) + EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index, uint16_t, ipv4_id, uint32_t, tcp_seq, uint8_t, tcp_flags); EFX_POPULATE_QWORD_5(edp->ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, ESF_DZ_TX_TSO_IP_ID, ipv4_id, ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); } void ef10_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint16_t tcp_mss, __out_ecount(count) efx_desc_t *edp, __in int count) { + _NOTE(ARGUNUSED(etp, count)) + EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index, uint16_t, ipv4_id, uint32_t, tcp_seq, uint16_t, tcp_mss); EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS); EFX_POPULATE_QWORD_5(edp[0].ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, ESF_DZ_TX_TSO_OPTION_TYPE, ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, ESF_DZ_TX_TSO_IP_ID, ipv4_id, ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); EFX_POPULATE_QWORD_4(edp[1].ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, ESF_DZ_TX_TSO_OPTION_TYPE, ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, ESF_DZ_TX_TSO_TCP_MSS, tcp_mss); } void ef10_tx_qdesc_vlantci_create( __in efx_txq_t *etp, __in uint16_t tci, __out efx_desc_t *edp) { + _NOTE(ARGUNUSED(etp)) + EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index, uint16_t, tci); EFX_POPULATE_QWORD_4(edp->ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_VLAN, ESF_DZ_TX_VLAN_OP, tci ? 1 : 0, ESF_DZ_TX_VLAN_TAG1, tci); } __checkReturn efx_rc_t ef10_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns) { efx_rc_t rc; /* FIXME */ _NOTE(ARGUNUSED(etp, ns)) _NOTE(CONSTANTCONDITION) if (B_FALSE) { rc = ENOTSUP; goto fail1; } /* FIXME */ return (0); fail1: /* * EALREADY is not an error, but indicates that the MC has rebooted and * that the TXQ has already been destroyed. Callers need to know that * the TXQ flush has completed to avoid waiting until timeout for a * flush done event that will not be delivered. */ if (rc != EALREADY) EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t ef10_tx_qflush( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_rc_t rc; if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void ef10_tx_qenable( __in efx_txq_t *etp) { /* FIXME */ _NOTE(ARGUNUSED(etp)) /* FIXME */ } #if EFSYS_OPT_QSTATS void ef10_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) { unsigned int id; for (id = 0; id < TX_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, etp->et_stat[id]); etp->et_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ Index: projects/clang700-import/sys/dev/sfxge/common/efx.h =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx.h (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx.h (revision 340918) @@ -1,2609 +1,2644 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_EFX_H #define _SYS_EFX_H #include "efsys.h" #include "efx_check.h" #include "efx_phy_ids.h" #ifdef __cplusplus extern "C" { #endif #define EFX_STATIC_ASSERT(_cond) \ - ((void)sizeof(char[(_cond) ? 1 : -1])) + ((void)sizeof (char[(_cond) ? 1 : -1])) #define EFX_ARRAY_SIZE(_array) \ - (sizeof(_array) / sizeof((_array)[0])) + (sizeof (_array) / sizeof ((_array)[0])) #define EFX_FIELD_OFFSET(_type, _field) \ - ((size_t) &(((_type *)0)->_field)) + ((size_t)&(((_type *)0)->_field)) /* The macro expands divider twice */ #define EFX_DIV_ROUND_UP(_n, _d) (((_n) + (_d) - 1) / (_d)) /* Return codes */ typedef __success(return == 0) int efx_rc_t; /* Chip families */ typedef enum efx_family_e { EFX_FAMILY_INVALID, EFX_FAMILY_FALCON, /* Obsolete and not supported */ EFX_FAMILY_SIENA, EFX_FAMILY_HUNTINGTON, EFX_FAMILY_MEDFORD, EFX_FAMILY_NTYPES } efx_family_t; extern __checkReturn efx_rc_t efx_family( __in uint16_t venid, __in uint16_t devid, __out efx_family_t *efp); #define EFX_PCI_VENID_SFC 0x1924 #define EFX_PCI_DEVID_FALCON 0x0710 /* SFC4000 */ #define EFX_PCI_DEVID_BETHPAGE 0x0803 /* SFC9020 */ #define EFX_PCI_DEVID_SIENA 0x0813 /* SFL9021 */ #define EFX_PCI_DEVID_SIENA_F1_UNINIT 0x0810 #define EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT 0x0901 #define EFX_PCI_DEVID_FARMINGDALE 0x0903 /* SFC9120 PF */ #define EFX_PCI_DEVID_GREENPORT 0x0923 /* SFC9140 PF */ #define EFX_PCI_DEVID_FARMINGDALE_VF 0x1903 /* SFC9120 VF */ #define EFX_PCI_DEVID_GREENPORT_VF 0x1923 /* SFC9140 VF */ #define EFX_PCI_DEVID_MEDFORD_PF_UNINIT 0x0913 #define EFX_PCI_DEVID_MEDFORD 0x0A03 /* SFC9240 PF */ #define EFX_PCI_DEVID_MEDFORD_VF 0x1A03 /* SFC9240 VF */ #define EFX_MEM_BAR 2 /* Error codes */ enum { EFX_ERR_INVALID, EFX_ERR_SRAM_OOB, EFX_ERR_BUFID_DC_OOB, EFX_ERR_MEM_PERR, EFX_ERR_RBUF_OWN, EFX_ERR_TBUF_OWN, EFX_ERR_RDESQ_OWN, EFX_ERR_TDESQ_OWN, EFX_ERR_EVQ_OWN, EFX_ERR_EVFF_OFLO, EFX_ERR_ILL_ADDR, EFX_ERR_SRAM_PERR, EFX_ERR_NCODES }; /* Calculate the IEEE 802.3 CRC32 of a MAC addr */ extern __checkReturn uint32_t efx_crc32_calculate( __in uint32_t crc_init, __in_ecount(length) uint8_t const *input, __in int length); /* Type prototypes */ typedef struct efx_rxq_s efx_rxq_t; /* NIC */ typedef struct efx_nic_s efx_nic_t; extern __checkReturn efx_rc_t efx_nic_create( __in efx_family_t family, __in efsys_identifier_t *esip, __in efsys_bar_t *esbp, __in efsys_lock_t *eslp, __deref_out efx_nic_t **enpp); extern __checkReturn efx_rc_t efx_nic_probe( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_nic_init( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_nic_reset( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG extern __checkReturn efx_rc_t efx_nic_register_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern void efx_nic_fini( __in efx_nic_t *enp); extern void efx_nic_unprobe( __in efx_nic_t *enp); extern void efx_nic_destroy( __in efx_nic_t *enp); #define EFX_PCIE_LINK_SPEED_GEN1 1 #define EFX_PCIE_LINK_SPEED_GEN2 2 #define EFX_PCIE_LINK_SPEED_GEN3 3 typedef enum efx_pcie_link_performance_e { EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH, EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH, EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY, EFX_PCIE_LINK_PERFORMANCE_OPTIMAL } efx_pcie_link_performance_t; extern __checkReturn efx_rc_t efx_nic_calculate_pcie_link_bandwidth( __in uint32_t pcie_link_width, __in uint32_t pcie_link_gen, __out uint32_t *bandwidth_mbpsp); extern __checkReturn efx_rc_t efx_nic_check_pcie_link_speed( __in efx_nic_t *enp, __in uint32_t pcie_link_width, __in uint32_t pcie_link_gen, __out efx_pcie_link_performance_t *resultp); #if EFSYS_OPT_MCDI #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD /* Huntington and Medford require MCDIv2 commands */ #define WITH_MCDI_V2 1 #endif typedef struct efx_mcdi_req_s efx_mcdi_req_t; typedef enum efx_mcdi_exception_e { EFX_MCDI_EXCEPTION_MC_REBOOT, EFX_MCDI_EXCEPTION_MC_BADASSERT, } efx_mcdi_exception_t; #if EFSYS_OPT_MCDI_LOGGING typedef enum efx_log_msg_e { EFX_LOG_INVALID, EFX_LOG_MCDI_REQUEST, EFX_LOG_MCDI_RESPONSE, } efx_log_msg_t; #endif /* EFSYS_OPT_MCDI_LOGGING */ typedef struct efx_mcdi_transport_s { void *emt_context; efsys_mem_t *emt_dma_mem; void (*emt_execute)(void *, efx_mcdi_req_t *); void (*emt_ev_cpl)(void *); void (*emt_exception)(void *, efx_mcdi_exception_t); #if EFSYS_OPT_MCDI_LOGGING void (*emt_logger)(void *, efx_log_msg_t, void *, size_t, void *, size_t); #endif /* EFSYS_OPT_MCDI_LOGGING */ #if EFSYS_OPT_MCDI_PROXY_AUTH void (*emt_ev_proxy_response)(void *, uint32_t, efx_rc_t); #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ } efx_mcdi_transport_t; extern __checkReturn efx_rc_t efx_mcdi_init( __in efx_nic_t *enp, __in const efx_mcdi_transport_t *mtp); extern __checkReturn efx_rc_t efx_mcdi_reboot( __in efx_nic_t *enp); void efx_mcdi_new_epoch( __in efx_nic_t *enp); extern void efx_mcdi_get_timeout( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __out uint32_t *usec_timeoutp); extern void efx_mcdi_request_start( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __in boolean_t ev_cpl); extern __checkReturn boolean_t efx_mcdi_request_poll( __in efx_nic_t *enp); extern __checkReturn boolean_t efx_mcdi_request_abort( __in efx_nic_t *enp); extern void efx_mcdi_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_MCDI */ /* INTR */ #define EFX_NINTR_SIENA 1024 typedef enum efx_intr_type_e { EFX_INTR_INVALID = 0, EFX_INTR_LINE, EFX_INTR_MESSAGE, EFX_INTR_NTYPES } efx_intr_type_t; #define EFX_INTR_SIZE (sizeof (efx_oword_t)) extern __checkReturn efx_rc_t efx_intr_init( __in efx_nic_t *enp, __in efx_intr_type_t type, __in efsys_mem_t *esmp); extern void efx_intr_enable( __in efx_nic_t *enp); extern void efx_intr_disable( __in efx_nic_t *enp); extern void efx_intr_disable_unlocked( __in efx_nic_t *enp); #define EFX_INTR_NEVQS 32 extern __checkReturn efx_rc_t efx_intr_trigger( __in efx_nic_t *enp, __in unsigned int level); extern void efx_intr_status_line( __in efx_nic_t *enp, __out boolean_t *fatalp, __out uint32_t *maskp); extern void efx_intr_status_message( __in efx_nic_t *enp, __in unsigned int message, __out boolean_t *fatalp); extern void efx_intr_fatal( __in efx_nic_t *enp); extern void efx_intr_fini( __in efx_nic_t *enp); /* MAC */ #if EFSYS_OPT_MAC_STATS /* START MKCONFIG GENERATED EfxHeaderMacBlock e323546097fd7c65 */ typedef enum efx_mac_stat_e { EFX_MAC_RX_OCTETS, EFX_MAC_RX_PKTS, EFX_MAC_RX_UNICST_PKTS, EFX_MAC_RX_MULTICST_PKTS, EFX_MAC_RX_BRDCST_PKTS, EFX_MAC_RX_PAUSE_PKTS, EFX_MAC_RX_LE_64_PKTS, EFX_MAC_RX_65_TO_127_PKTS, EFX_MAC_RX_128_TO_255_PKTS, EFX_MAC_RX_256_TO_511_PKTS, EFX_MAC_RX_512_TO_1023_PKTS, EFX_MAC_RX_1024_TO_15XX_PKTS, EFX_MAC_RX_GE_15XX_PKTS, EFX_MAC_RX_ERRORS, EFX_MAC_RX_FCS_ERRORS, EFX_MAC_RX_DROP_EVENTS, EFX_MAC_RX_FALSE_CARRIER_ERRORS, EFX_MAC_RX_SYMBOL_ERRORS, EFX_MAC_RX_ALIGN_ERRORS, EFX_MAC_RX_INTERNAL_ERRORS, EFX_MAC_RX_JABBER_PKTS, EFX_MAC_RX_LANE0_CHAR_ERR, EFX_MAC_RX_LANE1_CHAR_ERR, EFX_MAC_RX_LANE2_CHAR_ERR, EFX_MAC_RX_LANE3_CHAR_ERR, EFX_MAC_RX_LANE0_DISP_ERR, EFX_MAC_RX_LANE1_DISP_ERR, EFX_MAC_RX_LANE2_DISP_ERR, EFX_MAC_RX_LANE3_DISP_ERR, EFX_MAC_RX_MATCH_FAULT, EFX_MAC_RX_NODESC_DROP_CNT, EFX_MAC_TX_OCTETS, EFX_MAC_TX_PKTS, EFX_MAC_TX_UNICST_PKTS, EFX_MAC_TX_MULTICST_PKTS, EFX_MAC_TX_BRDCST_PKTS, EFX_MAC_TX_PAUSE_PKTS, EFX_MAC_TX_LE_64_PKTS, EFX_MAC_TX_65_TO_127_PKTS, EFX_MAC_TX_128_TO_255_PKTS, EFX_MAC_TX_256_TO_511_PKTS, EFX_MAC_TX_512_TO_1023_PKTS, EFX_MAC_TX_1024_TO_15XX_PKTS, EFX_MAC_TX_GE_15XX_PKTS, EFX_MAC_TX_ERRORS, EFX_MAC_TX_SGL_COL_PKTS, EFX_MAC_TX_MULT_COL_PKTS, EFX_MAC_TX_EX_COL_PKTS, EFX_MAC_TX_LATE_COL_PKTS, EFX_MAC_TX_DEF_PKTS, EFX_MAC_TX_EX_DEF_PKTS, EFX_MAC_PM_TRUNC_BB_OVERFLOW, EFX_MAC_PM_DISCARD_BB_OVERFLOW, EFX_MAC_PM_TRUNC_VFIFO_FULL, EFX_MAC_PM_DISCARD_VFIFO_FULL, EFX_MAC_PM_TRUNC_QBB, EFX_MAC_PM_DISCARD_QBB, EFX_MAC_PM_DISCARD_MAPPING, EFX_MAC_RXDP_Q_DISABLED_PKTS, EFX_MAC_RXDP_DI_DROPPED_PKTS, EFX_MAC_RXDP_STREAMING_PKTS, EFX_MAC_RXDP_HLB_FETCH, EFX_MAC_RXDP_HLB_WAIT, EFX_MAC_VADAPTER_RX_UNICAST_PACKETS, EFX_MAC_VADAPTER_RX_UNICAST_BYTES, EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS, EFX_MAC_VADAPTER_RX_MULTICAST_BYTES, EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS, EFX_MAC_VADAPTER_RX_BROADCAST_BYTES, EFX_MAC_VADAPTER_RX_BAD_PACKETS, EFX_MAC_VADAPTER_RX_BAD_BYTES, EFX_MAC_VADAPTER_RX_OVERFLOW, EFX_MAC_VADAPTER_TX_UNICAST_PACKETS, EFX_MAC_VADAPTER_TX_UNICAST_BYTES, EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS, EFX_MAC_VADAPTER_TX_MULTICAST_BYTES, EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS, EFX_MAC_VADAPTER_TX_BROADCAST_BYTES, EFX_MAC_VADAPTER_TX_BAD_PACKETS, EFX_MAC_VADAPTER_TX_BAD_BYTES, EFX_MAC_VADAPTER_TX_OVERFLOW, EFX_MAC_NSTATS } efx_mac_stat_t; /* END MKCONFIG GENERATED EfxHeaderMacBlock */ #endif /* EFSYS_OPT_MAC_STATS */ typedef enum efx_link_mode_e { EFX_LINK_UNKNOWN = 0, EFX_LINK_DOWN, EFX_LINK_10HDX, EFX_LINK_10FDX, EFX_LINK_100HDX, EFX_LINK_100FDX, EFX_LINK_1000HDX, EFX_LINK_1000FDX, EFX_LINK_10000FDX, EFX_LINK_40000FDX, EFX_LINK_NMODES } efx_link_mode_t; #define EFX_MAC_ADDR_LEN 6 #define EFX_MAC_ADDR_IS_MULTICAST(_address) (((uint8_t *)_address)[0] & 0x01) #define EFX_MAC_MULTICAST_LIST_MAX 256 #define EFX_MAC_SDU_MAX 9202 #define EFX_MAC_PDU_ADJUSTMENT \ (/* EtherII */ 14 \ + /* VLAN */ 4 \ + /* CRC */ 4 \ + /* bug16011 */ 16) \ #define EFX_MAC_PDU(_sdu) \ P2ROUNDUP((_sdu) + EFX_MAC_PDU_ADJUSTMENT, 8) /* * Due to the P2ROUNDUP in EFX_MAC_PDU(), EFX_MAC_SDU_FROM_PDU() may give * the SDU rounded up slightly. */ #define EFX_MAC_SDU_FROM_PDU(_pdu) ((_pdu) - EFX_MAC_PDU_ADJUSTMENT) #define EFX_MAC_PDU_MIN 60 #define EFX_MAC_PDU_MAX EFX_MAC_PDU(EFX_MAC_SDU_MAX) extern __checkReturn efx_rc_t efx_mac_pdu_get( __in efx_nic_t *enp, __out size_t *pdu); extern __checkReturn efx_rc_t efx_mac_pdu_set( __in efx_nic_t *enp, __in size_t pdu); extern __checkReturn efx_rc_t efx_mac_addr_set( __in efx_nic_t *enp, __in uint8_t *addr); extern __checkReturn efx_rc_t efx_mac_filter_set( __in efx_nic_t *enp, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst); extern __checkReturn efx_rc_t efx_mac_multicast_list_set( __in efx_nic_t *enp, __in_ecount(6*count) uint8_t const *addrs, __in int count); extern __checkReturn efx_rc_t efx_mac_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss); extern void efx_mac_filter_default_rxq_clear( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_mac_drain( __in efx_nic_t *enp, __in boolean_t enabled); extern __checkReturn efx_rc_t efx_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp); #define EFX_FCNTL_RESPOND 0x00000001 #define EFX_FCNTL_GENERATE 0x00000002 extern __checkReturn efx_rc_t efx_mac_fcntl_set( __in efx_nic_t *enp, __in unsigned int fcntl, __in boolean_t autoneg); extern void efx_mac_fcntl_get( __in efx_nic_t *enp, __out unsigned int *fcntl_wantedp, __out unsigned int *fcntl_linkp); #if EFSYS_OPT_MAC_STATS #if EFSYS_OPT_NAMES extern __checkReturn const char * efx_mac_stat_name( __in efx_nic_t *enp, __in unsigned int id); #endif /* EFSYS_OPT_NAMES */ #define EFX_MAC_STATS_MASK_BITS_PER_PAGE (8 * sizeof (uint32_t)) #define EFX_MAC_STATS_MASK_NPAGES \ (P2ROUNDUP(EFX_MAC_NSTATS, EFX_MAC_STATS_MASK_BITS_PER_PAGE) / \ EFX_MAC_STATS_MASK_BITS_PER_PAGE) /* * Get mask of MAC statistics supported by the hardware. * * If mask_size is insufficient to return the mask, EINVAL error is * returned. EFX_MAC_STATS_MASK_NPAGES multiplied by size of the page * (which is sizeof (uint32_t)) is sufficient. */ extern __checkReturn efx_rc_t efx_mac_stats_get_mask( __in efx_nic_t *enp, __out_bcount(mask_size) uint32_t *maskp, __in size_t mask_size); #define EFX_MAC_STAT_SUPPORTED(_mask, _stat) \ ((_mask)[(_stat) / EFX_MAC_STATS_MASK_BITS_PER_PAGE] & \ - (1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1)))) + (1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1)))) #define EFX_MAC_STATS_SIZE 0x400 extern __checkReturn efx_rc_t efx_mac_stats_clear( __in efx_nic_t *enp); /* * Upload mac statistics supported by the hardware into the given buffer. * * The reference buffer must be at least %EFX_MAC_STATS_SIZE bytes, * and page aligned. * * The hardware will only DMA statistics that it understands (of course). * Drivers should not make any assumptions about which statistics are * supported, especially when the statistics are generated by firmware. * * Thus, drivers should zero this buffer before use, so that not-understood * statistics read back as zero. */ extern __checkReturn efx_rc_t efx_mac_stats_upload( __in efx_nic_t *enp, __in efsys_mem_t *esmp); extern __checkReturn efx_rc_t efx_mac_stats_periodic( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __in uint16_t period_ms, __in boolean_t events); extern __checkReturn efx_rc_t efx_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp); #endif /* EFSYS_OPT_MAC_STATS */ /* MON */ typedef enum efx_mon_type_e { EFX_MON_INVALID = 0, EFX_MON_SFC90X0, EFX_MON_SFC91X0, EFX_MON_SFC92X0, EFX_MON_NTYPES } efx_mon_type_t; #if EFSYS_OPT_NAMES extern const char * efx_mon_name( __in efx_nic_t *enp); #endif /* EFSYS_OPT_NAMES */ extern __checkReturn efx_rc_t efx_mon_init( __in efx_nic_t *enp); #if EFSYS_OPT_MON_STATS #define EFX_MON_STATS_PAGE_SIZE 0x100 #define EFX_MON_MASK_ELEMENT_SIZE 32 -/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 5d4ee5185e419abe */ +/* START MKCONFIG GENERATED MonitorHeaderStatsBlock aa0233c80156308e */ typedef enum efx_mon_stat_e { EFX_MON_STAT_2_5V, EFX_MON_STAT_VCCP1, EFX_MON_STAT_VCC, EFX_MON_STAT_5V, EFX_MON_STAT_12V, EFX_MON_STAT_VCCP2, EFX_MON_STAT_EXT_TEMP, EFX_MON_STAT_INT_TEMP, EFX_MON_STAT_AIN1, EFX_MON_STAT_AIN2, EFX_MON_STAT_INT_COOLING, EFX_MON_STAT_EXT_COOLING, EFX_MON_STAT_1V, EFX_MON_STAT_1_2V, EFX_MON_STAT_1_8V, EFX_MON_STAT_3_3V, EFX_MON_STAT_1_2VA, EFX_MON_STAT_VREF, EFX_MON_STAT_VAOE, EFX_MON_STAT_AOE_TEMP, EFX_MON_STAT_PSU_AOE_TEMP, EFX_MON_STAT_PSU_TEMP, EFX_MON_STAT_FAN0, EFX_MON_STAT_FAN1, EFX_MON_STAT_FAN2, EFX_MON_STAT_FAN3, EFX_MON_STAT_FAN4, EFX_MON_STAT_VAOE_IN, EFX_MON_STAT_IAOE, EFX_MON_STAT_IAOE_IN, EFX_MON_STAT_NIC_POWER, EFX_MON_STAT_0_9V, EFX_MON_STAT_I0_9V, EFX_MON_STAT_I1_2V, EFX_MON_STAT_0_9V_ADC, EFX_MON_STAT_INT_TEMP2, EFX_MON_STAT_VREG_TEMP, EFX_MON_STAT_VREG_0_9V_TEMP, EFX_MON_STAT_VREG_1_2V_TEMP, EFX_MON_STAT_INT_VPTAT, EFX_MON_STAT_INT_ADC_TEMP, EFX_MON_STAT_EXT_VPTAT, EFX_MON_STAT_EXT_ADC_TEMP, EFX_MON_STAT_AMBIENT_TEMP, EFX_MON_STAT_AIRFLOW, EFX_MON_STAT_VDD08D_VSS08D_CSR, EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC, EFX_MON_STAT_HOTPOINT_TEMP, EFX_MON_STAT_PHY_POWER_SWITCH_PORT0, EFX_MON_STAT_PHY_POWER_SWITCH_PORT1, EFX_MON_STAT_MUM_VCC, EFX_MON_STAT_0V9_A, EFX_MON_STAT_I0V9_A, EFX_MON_STAT_0V9_A_TEMP, EFX_MON_STAT_0V9_B, EFX_MON_STAT_I0V9_B, EFX_MON_STAT_0V9_B_TEMP, EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY, EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXT_ADC, EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY, EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXT_ADC, EFX_MON_STAT_CONTROLLER_MASTER_VPTAT, EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP, EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXT_ADC, EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC, EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT, EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP, EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXT_ADC, EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC, EFX_MON_STAT_SODIMM_VOUT, EFX_MON_STAT_SODIMM_0_TEMP, EFX_MON_STAT_SODIMM_1_TEMP, EFX_MON_STAT_PHY0_VCC, EFX_MON_STAT_PHY1_VCC, EFX_MON_STAT_CONTROLLER_TDIODE_TEMP, EFX_MON_STAT_BOARD_FRONT_TEMP, EFX_MON_STAT_BOARD_BACK_TEMP, + EFX_MON_STAT_I1V8, + EFX_MON_STAT_I2V5, EFX_MON_NSTATS } efx_mon_stat_t; /* END MKCONFIG GENERATED MonitorHeaderStatsBlock */ typedef enum efx_mon_stat_state_e { EFX_MON_STAT_STATE_OK = 0, EFX_MON_STAT_STATE_WARNING = 1, EFX_MON_STAT_STATE_FATAL = 2, EFX_MON_STAT_STATE_BROKEN = 3, EFX_MON_STAT_STATE_NO_READING = 4, } efx_mon_stat_state_t; typedef struct efx_mon_stat_value_s { uint16_t emsv_value; uint16_t emsv_state; } efx_mon_stat_value_t; #if EFSYS_OPT_NAMES extern const char * efx_mon_stat_name( __in efx_nic_t *enp, __in efx_mon_stat_t id); #endif /* EFSYS_OPT_NAMES */ extern __checkReturn efx_rc_t efx_mon_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values); #endif /* EFSYS_OPT_MON_STATS */ extern void efx_mon_fini( __in efx_nic_t *enp); /* PHY */ extern __checkReturn efx_rc_t efx_phy_verify( __in efx_nic_t *enp); #if EFSYS_OPT_PHY_LED_CONTROL typedef enum efx_phy_led_mode_e { EFX_PHY_LED_DEFAULT = 0, EFX_PHY_LED_OFF, EFX_PHY_LED_ON, EFX_PHY_LED_FLASH, EFX_PHY_LED_NMODES } efx_phy_led_mode_t; extern __checkReturn efx_rc_t efx_phy_led_set( __in efx_nic_t *enp, __in efx_phy_led_mode_t mode); #endif /* EFSYS_OPT_PHY_LED_CONTROL */ extern __checkReturn efx_rc_t efx_port_init( __in efx_nic_t *enp); #if EFSYS_OPT_LOOPBACK typedef enum efx_loopback_type_e { EFX_LOOPBACK_OFF = 0, EFX_LOOPBACK_DATA = 1, EFX_LOOPBACK_GMAC = 2, EFX_LOOPBACK_XGMII = 3, EFX_LOOPBACK_XGXS = 4, EFX_LOOPBACK_XAUI = 5, EFX_LOOPBACK_GMII = 6, EFX_LOOPBACK_SGMII = 7, EFX_LOOPBACK_XGBR = 8, EFX_LOOPBACK_XFI = 9, EFX_LOOPBACK_XAUI_FAR = 10, EFX_LOOPBACK_GMII_FAR = 11, EFX_LOOPBACK_SGMII_FAR = 12, EFX_LOOPBACK_XFI_FAR = 13, EFX_LOOPBACK_GPHY = 14, EFX_LOOPBACK_PHY_XS = 15, EFX_LOOPBACK_PCS = 16, EFX_LOOPBACK_PMA_PMD = 17, EFX_LOOPBACK_XPORT = 18, EFX_LOOPBACK_XGMII_WS = 19, EFX_LOOPBACK_XAUI_WS = 20, EFX_LOOPBACK_XAUI_WS_FAR = 21, EFX_LOOPBACK_XAUI_WS_NEAR = 22, EFX_LOOPBACK_GMII_WS = 23, EFX_LOOPBACK_XFI_WS = 24, EFX_LOOPBACK_XFI_WS_FAR = 25, EFX_LOOPBACK_PHYXS_WS = 26, EFX_LOOPBACK_PMA_INT = 27, EFX_LOOPBACK_SD_NEAR = 28, EFX_LOOPBACK_SD_FAR = 29, EFX_LOOPBACK_PMA_INT_WS = 30, EFX_LOOPBACK_SD_FEP2_WS = 31, EFX_LOOPBACK_SD_FEP1_5_WS = 32, EFX_LOOPBACK_SD_FEP_WS = 33, EFX_LOOPBACK_SD_FES_WS = 34, EFX_LOOPBACK_NTYPES } efx_loopback_type_t; typedef enum efx_loopback_kind_e { EFX_LOOPBACK_KIND_OFF = 0, EFX_LOOPBACK_KIND_ALL, EFX_LOOPBACK_KIND_MAC, EFX_LOOPBACK_KIND_PHY, EFX_LOOPBACK_NKINDS } efx_loopback_kind_t; extern void efx_loopback_mask( __in efx_loopback_kind_t loopback_kind, __out efx_qword_t *maskp); extern __checkReturn efx_rc_t efx_port_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t type); #if EFSYS_OPT_NAMES extern __checkReturn const char * efx_loopback_type_name( __in efx_nic_t *enp, __in efx_loopback_type_t type); #endif /* EFSYS_OPT_NAMES */ #endif /* EFSYS_OPT_LOOPBACK */ extern __checkReturn efx_rc_t efx_port_poll( __in efx_nic_t *enp, __out_opt efx_link_mode_t *link_modep); extern void efx_port_fini( __in efx_nic_t *enp); typedef enum efx_phy_cap_type_e { EFX_PHY_CAP_INVALID = 0, EFX_PHY_CAP_10HDX, EFX_PHY_CAP_10FDX, EFX_PHY_CAP_100HDX, EFX_PHY_CAP_100FDX, EFX_PHY_CAP_1000HDX, EFX_PHY_CAP_1000FDX, EFX_PHY_CAP_10000FDX, EFX_PHY_CAP_PAUSE, EFX_PHY_CAP_ASYM, EFX_PHY_CAP_AN, EFX_PHY_CAP_40000FDX, EFX_PHY_CAP_NTYPES } efx_phy_cap_type_t; #define EFX_PHY_CAP_CURRENT 0x00000000 #define EFX_PHY_CAP_DEFAULT 0x00000001 #define EFX_PHY_CAP_PERM 0x00000002 extern void efx_phy_adv_cap_get( __in efx_nic_t *enp, __in uint32_t flag, __out uint32_t *maskp); extern __checkReturn efx_rc_t efx_phy_adv_cap_set( __in efx_nic_t *enp, __in uint32_t mask); extern void efx_phy_lp_cap_get( __in efx_nic_t *enp, __out uint32_t *maskp); extern __checkReturn efx_rc_t efx_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip); typedef enum efx_phy_media_type_e { EFX_PHY_MEDIA_INVALID = 0, EFX_PHY_MEDIA_XAUI, EFX_PHY_MEDIA_CX4, EFX_PHY_MEDIA_KX4, EFX_PHY_MEDIA_XFP, EFX_PHY_MEDIA_SFP_PLUS, EFX_PHY_MEDIA_BASE_T, EFX_PHY_MEDIA_QSFP_PLUS, EFX_PHY_MEDIA_NTYPES } efx_phy_media_type_t; -/* Get the type of medium currently used. If the board has ports for +/* + * Get the type of medium currently used. If the board has ports for * modules, a module is present, and we recognise the media type of * the module, then this will be the media type of the module. * Otherwise it will be the media type of the port. */ extern void efx_phy_media_type_get( __in efx_nic_t *enp, __out efx_phy_media_type_t *typep); extern __checkReturn efx_rc_t efx_phy_module_get_info( __in efx_nic_t *enp, __in uint8_t dev_addr, __in uint8_t offset, __in uint8_t len, __out_bcount(len) uint8_t *data); #if EFSYS_OPT_PHY_STATS /* START MKCONFIG GENERATED PhyHeaderStatsBlock 30ed56ad501f8e36 */ typedef enum efx_phy_stat_e { EFX_PHY_STAT_OUI, EFX_PHY_STAT_PMA_PMD_LINK_UP, EFX_PHY_STAT_PMA_PMD_RX_FAULT, EFX_PHY_STAT_PMA_PMD_TX_FAULT, EFX_PHY_STAT_PMA_PMD_REV_A, EFX_PHY_STAT_PMA_PMD_REV_B, EFX_PHY_STAT_PMA_PMD_REV_C, EFX_PHY_STAT_PMA_PMD_REV_D, EFX_PHY_STAT_PCS_LINK_UP, EFX_PHY_STAT_PCS_RX_FAULT, EFX_PHY_STAT_PCS_TX_FAULT, EFX_PHY_STAT_PCS_BER, EFX_PHY_STAT_PCS_BLOCK_ERRORS, EFX_PHY_STAT_PHY_XS_LINK_UP, EFX_PHY_STAT_PHY_XS_RX_FAULT, EFX_PHY_STAT_PHY_XS_TX_FAULT, EFX_PHY_STAT_PHY_XS_ALIGN, EFX_PHY_STAT_PHY_XS_SYNC_A, EFX_PHY_STAT_PHY_XS_SYNC_B, EFX_PHY_STAT_PHY_XS_SYNC_C, EFX_PHY_STAT_PHY_XS_SYNC_D, EFX_PHY_STAT_AN_LINK_UP, EFX_PHY_STAT_AN_MASTER, EFX_PHY_STAT_AN_LOCAL_RX_OK, EFX_PHY_STAT_AN_REMOTE_RX_OK, EFX_PHY_STAT_CL22EXT_LINK_UP, EFX_PHY_STAT_SNR_A, EFX_PHY_STAT_SNR_B, EFX_PHY_STAT_SNR_C, EFX_PHY_STAT_SNR_D, EFX_PHY_STAT_PMA_PMD_SIGNAL_A, EFX_PHY_STAT_PMA_PMD_SIGNAL_B, EFX_PHY_STAT_PMA_PMD_SIGNAL_C, EFX_PHY_STAT_PMA_PMD_SIGNAL_D, EFX_PHY_STAT_AN_COMPLETE, EFX_PHY_STAT_PMA_PMD_REV_MAJOR, EFX_PHY_STAT_PMA_PMD_REV_MINOR, EFX_PHY_STAT_PMA_PMD_REV_MICRO, EFX_PHY_STAT_PCS_FW_VERSION_0, EFX_PHY_STAT_PCS_FW_VERSION_1, EFX_PHY_STAT_PCS_FW_VERSION_2, EFX_PHY_STAT_PCS_FW_VERSION_3, EFX_PHY_STAT_PCS_FW_BUILD_YY, EFX_PHY_STAT_PCS_FW_BUILD_MM, EFX_PHY_STAT_PCS_FW_BUILD_DD, EFX_PHY_STAT_PCS_OP_MODE, EFX_PHY_NSTATS } efx_phy_stat_t; /* END MKCONFIG GENERATED PhyHeaderStatsBlock */ #if EFSYS_OPT_NAMES extern const char * efx_phy_stat_name( __in efx_nic_t *enp, __in efx_phy_stat_t stat); #endif /* EFSYS_OPT_NAMES */ #define EFX_PHY_STATS_SIZE 0x100 extern __checkReturn efx_rc_t efx_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat); #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_BIST typedef enum efx_bist_type_e { EFX_BIST_TYPE_UNKNOWN, EFX_BIST_TYPE_PHY_NORMAL, EFX_BIST_TYPE_PHY_CABLE_SHORT, EFX_BIST_TYPE_PHY_CABLE_LONG, EFX_BIST_TYPE_MC_MEM, /* Test the MC DMEM and IMEM */ - EFX_BIST_TYPE_SAT_MEM, /* Test the DMEM and IMEM of satellite cpus*/ + EFX_BIST_TYPE_SAT_MEM, /* Test the DMEM and IMEM of satellite cpus */ EFX_BIST_TYPE_REG, /* Test the register memories */ EFX_BIST_TYPE_NTYPES, } efx_bist_type_t; typedef enum efx_bist_result_e { EFX_BIST_RESULT_UNKNOWN, EFX_BIST_RESULT_RUNNING, EFX_BIST_RESULT_PASSED, EFX_BIST_RESULT_FAILED, } efx_bist_result_t; typedef enum efx_phy_cable_status_e { EFX_PHY_CABLE_STATUS_OK, EFX_PHY_CABLE_STATUS_INVALID, EFX_PHY_CABLE_STATUS_OPEN, EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT, EFX_PHY_CABLE_STATUS_INTERPAIRSHORT, EFX_PHY_CABLE_STATUS_BUSY, } efx_phy_cable_status_t; typedef enum efx_bist_value_e { EFX_BIST_PHY_CABLE_LENGTH_A, EFX_BIST_PHY_CABLE_LENGTH_B, EFX_BIST_PHY_CABLE_LENGTH_C, EFX_BIST_PHY_CABLE_LENGTH_D, EFX_BIST_PHY_CABLE_STATUS_A, EFX_BIST_PHY_CABLE_STATUS_B, EFX_BIST_PHY_CABLE_STATUS_C, EFX_BIST_PHY_CABLE_STATUS_D, EFX_BIST_FAULT_CODE, - /* Memory BIST specific values. These match to the MC_CMD_BIST_POLL - * response. */ + /* + * Memory BIST specific values. These match to the MC_CMD_BIST_POLL + * response. + */ EFX_BIST_MEM_TEST, EFX_BIST_MEM_ADDR, EFX_BIST_MEM_BUS, EFX_BIST_MEM_EXPECT, EFX_BIST_MEM_ACTUAL, EFX_BIST_MEM_ECC, EFX_BIST_MEM_ECC_PARITY, EFX_BIST_MEM_ECC_FATAL, EFX_BIST_NVALUES, } efx_bist_value_t; extern __checkReturn efx_rc_t efx_bist_enable_offline( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type); extern __checkReturn efx_rc_t efx_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt uint32_t *value_maskp, __out_ecount_opt(count) unsigned long *valuesp, __in size_t count); extern void efx_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type); #endif /* EFSYS_OPT_BIST */ #define EFX_FEATURE_IPV6 0x00000001 #define EFX_FEATURE_LFSR_HASH_INSERT 0x00000002 #define EFX_FEATURE_LINK_EVENTS 0x00000004 #define EFX_FEATURE_PERIODIC_MAC_STATS 0x00000008 #define EFX_FEATURE_MCDI 0x00000020 #define EFX_FEATURE_LOOKAHEAD_SPLIT 0x00000040 #define EFX_FEATURE_MAC_HEADER_FILTERS 0x00000080 #define EFX_FEATURE_TURBO 0x00000100 #define EFX_FEATURE_MCDI_DMA 0x00000200 #define EFX_FEATURE_TX_SRC_FILTERS 0x00000400 #define EFX_FEATURE_PIO_BUFFERS 0x00000800 #define EFX_FEATURE_FW_ASSISTED_TSO 0x00001000 #define EFX_FEATURE_FW_ASSISTED_TSO_V2 0x00002000 #define EFX_FEATURE_PACKED_STREAM 0x00004000 typedef enum efx_tunnel_protocol_e { EFX_TUNNEL_PROTOCOL_NONE = 0, EFX_TUNNEL_PROTOCOL_VXLAN, EFX_TUNNEL_PROTOCOL_GENEVE, EFX_TUNNEL_PROTOCOL_NVGRE, EFX_TUNNEL_NPROTOS } efx_tunnel_protocol_t; typedef struct efx_nic_cfg_s { uint32_t enc_board_type; uint32_t enc_phy_type; #if EFSYS_OPT_NAMES char enc_phy_name[21]; #endif char enc_phy_revision[21]; efx_mon_type_t enc_mon_type; #if EFSYS_OPT_MON_STATS uint32_t enc_mon_stat_dma_buf_size; uint32_t enc_mon_stat_mask[(EFX_MON_NSTATS + 31) / 32]; #endif unsigned int enc_features; uint8_t enc_mac_addr[6]; uint8_t enc_port; /* PHY port number */ uint32_t enc_intr_vec_base; uint32_t enc_intr_limit; uint32_t enc_evq_limit; uint32_t enc_txq_limit; uint32_t enc_rxq_limit; uint32_t enc_txq_max_ndescs; uint32_t enc_buftbl_limit; uint32_t enc_piobuf_limit; uint32_t enc_piobuf_size; uint32_t enc_piobuf_min_alloc_size; uint32_t enc_evq_timer_quantum_ns; uint32_t enc_evq_timer_max_us; uint32_t enc_clk_mult; uint32_t enc_rx_prefix_size; uint32_t enc_rx_buf_align_start; uint32_t enc_rx_buf_align_end; uint32_t enc_rx_scale_max_exclusive_contexts; #if EFSYS_OPT_LOOPBACK efx_qword_t enc_loopback_types[EFX_LINK_NMODES]; #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_PHY_FLAGS uint32_t enc_phy_flags_mask; #endif /* EFSYS_OPT_PHY_FLAGS */ #if EFSYS_OPT_PHY_LED_CONTROL uint32_t enc_led_mask; #endif /* EFSYS_OPT_PHY_LED_CONTROL */ #if EFSYS_OPT_PHY_STATS uint64_t enc_phy_stat_mask; #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_MCDI uint8_t enc_mcdi_mdio_channel; #if EFSYS_OPT_PHY_STATS uint32_t enc_mcdi_phy_stat_mask; #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_MON_STATS uint32_t *enc_mcdi_sensor_maskp; uint32_t enc_mcdi_sensor_mask_size; #endif /* EFSYS_OPT_MON_STATS */ #endif /* EFSYS_OPT_MCDI */ #if EFSYS_OPT_BIST uint32_t enc_bist_mask; #endif /* EFSYS_OPT_BIST */ #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD uint32_t enc_pf; uint32_t enc_vf; uint32_t enc_privilege_mask; #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ boolean_t enc_bug26807_workaround; boolean_t enc_bug35388_workaround; boolean_t enc_bug41750_workaround; boolean_t enc_bug61265_workaround; boolean_t enc_rx_batching_enabled; /* Maximum number of descriptors completed in an rx event. */ uint32_t enc_rx_batch_max; /* Number of rx descriptors the hardware requires for a push. */ uint32_t enc_rx_push_align; /* Maximum amount of data in DMA descriptor */ uint32_t enc_tx_dma_desc_size_max; /* * Boundary which DMA descriptor data must not cross or 0 if no * limitation. */ uint32_t enc_tx_dma_desc_boundary; /* * Maximum number of bytes into the packet the TCP header can start for * the hardware to apply TSO packet edits. */ uint32_t enc_tx_tso_tcp_header_offset_limit; boolean_t enc_fw_assisted_tso_enabled; boolean_t enc_fw_assisted_tso_v2_enabled; /* Number of TSO contexts on the NIC (FATSOv2) */ uint32_t enc_fw_assisted_tso_v2_n_contexts; boolean_t enc_hw_tx_insert_vlan_enabled; /* Number of PFs on the NIC */ uint32_t enc_hw_pf_count; /* Datapath firmware vadapter/vport/vswitch support */ boolean_t enc_datapath_cap_evb; boolean_t enc_rx_disable_scatter_supported; boolean_t enc_allow_set_mac_with_installed_filters; boolean_t enc_enhanced_set_mac_supported; boolean_t enc_init_evq_v2_supported; boolean_t enc_rx_packed_stream_supported; boolean_t enc_rx_var_packed_stream_supported; boolean_t enc_pm_and_rxdp_counters; boolean_t enc_mac_stats_40g_tx_size_bins; uint32_t enc_tunnel_encapsulations_supported; /* External port identifier */ uint8_t enc_external_port; uint32_t enc_mcdi_max_payload_length; /* VPD may be per-PF or global */ boolean_t enc_vpd_is_global; /* Minimum unidirectional bandwidth in Mb/s to max out all ports */ uint32_t enc_required_pcie_bandwidth_mbps; uint32_t enc_max_pcie_link_gen; /* Firmware verifies integrity of NVRAM updates */ uint32_t enc_nvram_update_verify_result_supported; } efx_nic_cfg_t; #define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff) #define EFX_PCI_FUNCTION_IS_VF(_encp) ((_encp)->enc_vf != 0xffff) #define EFX_PCI_FUNCTION(_encp) \ (EFX_PCI_FUNCTION_IS_PF(_encp) ? (_encp)->enc_pf : (_encp)->enc_vf) #define EFX_PCI_VF_PARENT(_encp) ((_encp)->enc_pf) extern const efx_nic_cfg_t * efx_nic_cfg_get( __in efx_nic_t *enp); typedef struct efx_nic_fw_info_s { /* Basic FW version information */ uint16_t enfi_mc_fw_version[4]; /* * If datapath capabilities can be detected, * additional FW information is to be shown */ boolean_t enfi_dpcpu_fw_ids_valid; /* Rx and Tx datapath CPU FW IDs */ uint16_t enfi_rx_dpcpu_fw_id; uint16_t enfi_tx_dpcpu_fw_id; } efx_nic_fw_info_t; extern __checkReturn efx_rc_t efx_nic_get_fw_version( __in efx_nic_t *enp, __out efx_nic_fw_info_t *enfip); /* Driver resource limits (minimum required/maximum usable). */ typedef struct efx_drv_limits_s { uint32_t edl_min_evq_count; uint32_t edl_max_evq_count; uint32_t edl_min_rxq_count; uint32_t edl_max_rxq_count; uint32_t edl_min_txq_count; uint32_t edl_max_txq_count; /* PIO blocks (sub-allocated from piobuf) */ uint32_t edl_min_pio_alloc_size; uint32_t edl_max_pio_alloc_count; } efx_drv_limits_t; extern __checkReturn efx_rc_t efx_nic_set_drv_limits( __inout efx_nic_t *enp, __in efx_drv_limits_t *edlp); typedef enum efx_nic_region_e { EFX_REGION_VI, /* Memory BAR UC mapping */ EFX_REGION_PIO_WRITE_VI, /* Memory BAR WC mapping */ } efx_nic_region_t; extern __checkReturn efx_rc_t efx_nic_get_bar_region( __in efx_nic_t *enp, __in efx_nic_region_t region, __out uint32_t *offsetp, __out size_t *sizep); extern __checkReturn efx_rc_t efx_nic_get_vi_pool( __in efx_nic_t *enp, __out uint32_t *evq_countp, __out uint32_t *rxq_countp, __out uint32_t *txq_countp); #if EFSYS_OPT_VPD typedef enum efx_vpd_tag_e { EFX_VPD_ID = 0x02, EFX_VPD_END = 0x0f, EFX_VPD_RO = 0x10, EFX_VPD_RW = 0x11, } efx_vpd_tag_t; typedef uint16_t efx_vpd_keyword_t; typedef struct efx_vpd_value_s { efx_vpd_tag_t evv_tag; efx_vpd_keyword_t evv_keyword; uint8_t evv_length; uint8_t evv_value[0x100]; } efx_vpd_value_t; #define EFX_VPD_KEYWORD(x, y) ((x) | ((y) << 8)) extern __checkReturn efx_rc_t efx_vpd_init( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_vpd_size( __in efx_nic_t *enp, __out size_t *sizep); extern __checkReturn efx_rc_t efx_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t efx_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t efx_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t efx_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp); extern __checkReturn efx_rc_t efx_vpd_set( __in efx_nic_t *enp, __inout_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp); extern __checkReturn efx_rc_t efx_vpd_next( __in efx_nic_t *enp, __inout_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp); extern __checkReturn efx_rc_t efx_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern void efx_vpd_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_VPD */ /* NVRAM */ #if EFSYS_OPT_NVRAM typedef enum efx_nvram_type_e { EFX_NVRAM_INVALID = 0, EFX_NVRAM_BOOTROM, EFX_NVRAM_BOOTROM_CFG, EFX_NVRAM_MC_FIRMWARE, EFX_NVRAM_MC_GOLDEN, EFX_NVRAM_PHY, EFX_NVRAM_NULLPHY, EFX_NVRAM_FPGA, EFX_NVRAM_FCFW, EFX_NVRAM_CPLD, EFX_NVRAM_FPGA_BACKUP, EFX_NVRAM_DYNAMIC_CFG, EFX_NVRAM_LICENSE, EFX_NVRAM_UEFIROM, EFX_NVRAM_MUM_FIRMWARE, EFX_NVRAM_NTYPES, } efx_nvram_type_t; extern __checkReturn efx_rc_t efx_nvram_init( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG extern __checkReturn efx_rc_t efx_nvram_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern __checkReturn efx_rc_t efx_nvram_size( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out size_t *sizep); extern __checkReturn efx_rc_t efx_nvram_rw_start( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out_opt size_t *pref_chunkp); extern __checkReturn efx_rc_t efx_nvram_rw_finish( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out_opt uint32_t *verify_resultp); extern __checkReturn efx_rc_t efx_nvram_get_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]); extern __checkReturn efx_rc_t efx_nvram_read_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t efx_nvram_read_backup( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t efx_nvram_set_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in_ecount(4) uint16_t version[4]); extern __checkReturn efx_rc_t efx_nvram_validate( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in_bcount(partn_size) caddr_t partn_data, __in size_t partn_size); extern __checkReturn efx_rc_t efx_nvram_erase( __in efx_nic_t *enp, __in efx_nvram_type_t type); extern __checkReturn efx_rc_t efx_nvram_write_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __in_bcount(size) caddr_t data, __in size_t size); extern void efx_nvram_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_NVRAM */ #if EFSYS_OPT_BOOTCFG /* Report size and offset of bootcfg sector in NVRAM partition. */ extern __checkReturn efx_rc_t efx_bootcfg_sector_info( __in efx_nic_t *enp, __in uint32_t pf, __out_opt uint32_t *sector_countp, __out size_t *offsetp, __out size_t *max_sizep); /* * Copy bootcfg sector data to a target buffer which may differ in size. * Optionally corrects format errors in source buffer. */ extern efx_rc_t efx_bootcfg_copy_sector( __in efx_nic_t *enp, __inout_bcount(sector_length) uint8_t *sector, __in size_t sector_length, __out_bcount(data_size) uint8_t *data, __in size_t data_size, __in boolean_t handle_format_errors); extern efx_rc_t efx_bootcfg_read( __in efx_nic_t *enp, - __out_bcount(size) caddr_t data, + __out_bcount(size) uint8_t *data, __in size_t size); extern efx_rc_t efx_bootcfg_write( __in efx_nic_t *enp, - __in_bcount(size) caddr_t data, + __in_bcount(size) uint8_t *data, __in size_t size); #endif /* EFSYS_OPT_BOOTCFG */ #if EFSYS_OPT_DIAG typedef enum efx_pattern_type_t { EFX_PATTERN_BYTE_INCREMENT = 0, EFX_PATTERN_ALL_THE_SAME, EFX_PATTERN_BIT_ALTERNATE, EFX_PATTERN_BYTE_ALTERNATE, EFX_PATTERN_BYTE_CHANGING, EFX_PATTERN_BIT_SWEEP, EFX_PATTERN_NTYPES } efx_pattern_type_t; typedef void (*efx_sram_pattern_fn_t)( __in size_t row, __in boolean_t negate, __out efx_qword_t *eqp); extern __checkReturn efx_rc_t efx_sram_test( __in efx_nic_t *enp, __in efx_pattern_type_t type); #endif /* EFSYS_OPT_DIAG */ extern __checkReturn efx_rc_t efx_sram_buf_tbl_set( __in efx_nic_t *enp, __in uint32_t id, __in efsys_mem_t *esmp, __in size_t n); extern void efx_sram_buf_tbl_clear( __in efx_nic_t *enp, __in uint32_t id, __in size_t n); #define EFX_BUF_TBL_SIZE 0x20000 #define EFX_BUF_SIZE 4096 /* EV */ typedef struct efx_evq_s efx_evq_t; #if EFSYS_OPT_QSTATS /* START MKCONFIG GENERATED EfxHeaderEventQueueBlock 6f3843f5fe7cc843 */ typedef enum efx_ev_qstat_e { EV_ALL, EV_RX, EV_RX_OK, EV_RX_FRM_TRUNC, EV_RX_TOBE_DISC, EV_RX_PAUSE_FRM_ERR, EV_RX_BUF_OWNER_ID_ERR, EV_RX_IPV4_HDR_CHKSUM_ERR, EV_RX_TCP_UDP_CHKSUM_ERR, EV_RX_ETH_CRC_ERR, EV_RX_IP_FRAG_ERR, EV_RX_MCAST_PKT, EV_RX_MCAST_HASH_MATCH, EV_RX_TCP_IPV4, EV_RX_TCP_IPV6, EV_RX_UDP_IPV4, EV_RX_UDP_IPV6, EV_RX_OTHER_IPV4, EV_RX_OTHER_IPV6, EV_RX_NON_IP, EV_RX_BATCH, EV_TX, EV_TX_WQ_FF_FULL, EV_TX_PKT_ERR, EV_TX_PKT_TOO_BIG, EV_TX_UNEXPECTED, EV_GLOBAL, EV_GLOBAL_MNT, EV_DRIVER, EV_DRIVER_SRM_UPD_DONE, EV_DRIVER_TX_DESCQ_FLS_DONE, EV_DRIVER_RX_DESCQ_FLS_DONE, EV_DRIVER_RX_DESCQ_FLS_FAILED, EV_DRIVER_RX_DSC_ERROR, EV_DRIVER_TX_DSC_ERROR, EV_DRV_GEN, EV_MCDI_RESPONSE, EV_NQSTATS } efx_ev_qstat_t; /* END MKCONFIG GENERATED EfxHeaderEventQueueBlock */ #endif /* EFSYS_OPT_QSTATS */ extern __checkReturn efx_rc_t efx_ev_init( __in efx_nic_t *enp); extern void efx_ev_fini( __in efx_nic_t *enp); #define EFX_EVQ_MAXNEVS 32768 #define EFX_EVQ_MINNEVS 512 #define EFX_EVQ_SIZE(_nevs) ((_nevs) * sizeof (efx_qword_t)) #define EFX_EVQ_NBUFS(_nevs) (EFX_EVQ_SIZE(_nevs) / EFX_BUF_SIZE) #define EFX_EVQ_FLAGS_TYPE_MASK (0x3) #define EFX_EVQ_FLAGS_TYPE_AUTO (0x0) #define EFX_EVQ_FLAGS_TYPE_THROUGHPUT (0x1) #define EFX_EVQ_FLAGS_TYPE_LOW_LATENCY (0x2) #define EFX_EVQ_FLAGS_NOTIFY_MASK (0xC) #define EFX_EVQ_FLAGS_NOTIFY_INTERRUPT (0x0) /* Interrupting (default) */ #define EFX_EVQ_FLAGS_NOTIFY_DISABLED (0x4) /* Non-interrupting */ extern __checkReturn efx_rc_t efx_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, __in uint32_t us, __in uint32_t flags, __deref_out efx_evq_t **eepp); extern void efx_ev_qpost( __in efx_evq_t *eep, __in uint16_t data); typedef __checkReturn boolean_t (*efx_initialized_ev_t)( __in_opt void *arg); #define EFX_PKT_UNICAST 0x0004 #define EFX_PKT_START 0x0008 #define EFX_PKT_VLAN_TAGGED 0x0010 #define EFX_CKSUM_TCPUDP 0x0020 #define EFX_CKSUM_IPV4 0x0040 #define EFX_PKT_CONT 0x0080 #define EFX_CHECK_VLAN 0x0100 #define EFX_PKT_TCP 0x0200 #define EFX_PKT_UDP 0x0400 #define EFX_PKT_IPV4 0x0800 #define EFX_PKT_IPV6 0x1000 #define EFX_PKT_PREFIX_LEN 0x2000 #define EFX_ADDR_MISMATCH 0x4000 #define EFX_DISCARD 0x8000 /* * The following flags are used only for packed stream * mode. The values for the flags are reused to fit into 16 bit, * since EFX_PKT_START and EFX_PKT_CONT are never used in * packed stream mode */ #define EFX_PKT_PACKED_STREAM_NEW_BUFFER EFX_PKT_START #define EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE EFX_PKT_CONT #define EFX_EV_RX_NLABELS 32 #define EFX_EV_TX_NLABELS 32 typedef __checkReturn boolean_t (*efx_rx_ev_t)( __in_opt void *arg, __in uint32_t label, __in uint32_t id, __in uint32_t size, __in uint16_t flags); #if EFSYS_OPT_RX_PACKED_STREAM /* * Packed stream mode is documented in SF-112241-TC. * The general idea is that, instead of putting each incoming * packet into a separate buffer which is specified in a RX * descriptor, a large buffer is provided to the hardware and * packets are put there in a continuous stream. * The main advantage of such an approach is that RX queue refilling * happens much less frequently. */ typedef __checkReturn boolean_t (*efx_rx_ps_ev_t)( __in_opt void *arg, __in uint32_t label, __in uint32_t id, __in uint32_t pkt_count, __in uint16_t flags); #endif typedef __checkReturn boolean_t (*efx_tx_ev_t)( __in_opt void *arg, __in uint32_t label, __in uint32_t id); #define EFX_EXCEPTION_RX_RECOVERY 0x00000001 #define EFX_EXCEPTION_RX_DSC_ERROR 0x00000002 #define EFX_EXCEPTION_TX_DSC_ERROR 0x00000003 #define EFX_EXCEPTION_UNKNOWN_SENSOREVT 0x00000004 #define EFX_EXCEPTION_FWALERT_SRAM 0x00000005 #define EFX_EXCEPTION_UNKNOWN_FWALERT 0x00000006 #define EFX_EXCEPTION_RX_ERROR 0x00000007 #define EFX_EXCEPTION_TX_ERROR 0x00000008 #define EFX_EXCEPTION_EV_ERROR 0x00000009 typedef __checkReturn boolean_t (*efx_exception_ev_t)( __in_opt void *arg, __in uint32_t label, __in uint32_t data); typedef __checkReturn boolean_t (*efx_rxq_flush_done_ev_t)( __in_opt void *arg, __in uint32_t rxq_index); typedef __checkReturn boolean_t (*efx_rxq_flush_failed_ev_t)( __in_opt void *arg, __in uint32_t rxq_index); typedef __checkReturn boolean_t (*efx_txq_flush_done_ev_t)( __in_opt void *arg, __in uint32_t txq_index); typedef __checkReturn boolean_t (*efx_software_ev_t)( __in_opt void *arg, __in uint16_t magic); typedef __checkReturn boolean_t (*efx_sram_ev_t)( __in_opt void *arg, __in uint32_t code); #define EFX_SRAM_CLEAR 0 #define EFX_SRAM_UPDATE 1 #define EFX_SRAM_ILLEGAL_CLEAR 2 typedef __checkReturn boolean_t (*efx_wake_up_ev_t)( __in_opt void *arg, __in uint32_t label); typedef __checkReturn boolean_t (*efx_timer_ev_t)( __in_opt void *arg, __in uint32_t label); typedef __checkReturn boolean_t (*efx_link_change_ev_t)( __in_opt void *arg, __in efx_link_mode_t link_mode); #if EFSYS_OPT_MON_STATS typedef __checkReturn boolean_t (*efx_monitor_ev_t)( __in_opt void *arg, __in efx_mon_stat_t id, __in efx_mon_stat_value_t value); #endif /* EFSYS_OPT_MON_STATS */ #if EFSYS_OPT_MAC_STATS typedef __checkReturn boolean_t (*efx_mac_stats_ev_t)( __in_opt void *arg, - __in uint32_t generation - ); + __in uint32_t generation); #endif /* EFSYS_OPT_MAC_STATS */ typedef struct efx_ev_callbacks_s { efx_initialized_ev_t eec_initialized; efx_rx_ev_t eec_rx; #if EFSYS_OPT_RX_PACKED_STREAM efx_rx_ps_ev_t eec_rx_ps; #endif efx_tx_ev_t eec_tx; efx_exception_ev_t eec_exception; efx_rxq_flush_done_ev_t eec_rxq_flush_done; efx_rxq_flush_failed_ev_t eec_rxq_flush_failed; efx_txq_flush_done_ev_t eec_txq_flush_done; efx_software_ev_t eec_software; efx_sram_ev_t eec_sram; efx_wake_up_ev_t eec_wake_up; efx_timer_ev_t eec_timer; efx_link_change_ev_t eec_link_change; #if EFSYS_OPT_MON_STATS efx_monitor_ev_t eec_monitor; #endif /* EFSYS_OPT_MON_STATS */ #if EFSYS_OPT_MAC_STATS efx_mac_stats_ev_t eec_mac_stats; #endif /* EFSYS_OPT_MAC_STATS */ } efx_ev_callbacks_t; extern __checkReturn boolean_t efx_ev_qpending( __in efx_evq_t *eep, __in unsigned int count); #if EFSYS_OPT_EV_PREFETCH extern void efx_ev_qprefetch( __in efx_evq_t *eep, __in unsigned int count); #endif /* EFSYS_OPT_EV_PREFETCH */ extern void efx_ev_qpoll( __in efx_evq_t *eep, __inout unsigned int *countp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); extern __checkReturn efx_rc_t efx_ev_usecs_to_ticks( __in efx_nic_t *enp, __in unsigned int usecs, __out unsigned int *ticksp); extern __checkReturn efx_rc_t efx_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us); extern __checkReturn efx_rc_t efx_ev_qprime( __in efx_evq_t *eep, __in unsigned int count); #if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES extern const char * efx_ev_qstat_name( __in efx_nic_t *enp, __in unsigned int id); #endif /* EFSYS_OPT_NAMES */ extern void efx_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ extern void efx_ev_qdestroy( __in efx_evq_t *eep); /* RX */ extern __checkReturn efx_rc_t efx_rx_init( __inout efx_nic_t *enp); extern void efx_rx_fini( __in efx_nic_t *enp); #if EFSYS_OPT_RX_SCATTER __checkReturn efx_rc_t efx_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size); #endif /* EFSYS_OPT_RX_SCATTER */ /* Handle to represent use of the default RSS context. */ #define EFX_RSS_CONTEXT_DEFAULT 0xffffffff #if EFSYS_OPT_RX_SCALE typedef enum efx_rx_hash_alg_e { EFX_RX_HASHALG_LFSR = 0, EFX_RX_HASHALG_TOEPLITZ } efx_rx_hash_alg_t; #define EFX_RX_HASH_IPV4 (1U << 0) #define EFX_RX_HASH_TCPIPV4 (1U << 1) #define EFX_RX_HASH_IPV6 (1U << 2) #define EFX_RX_HASH_TCPIPV6 (1U << 3) typedef unsigned int efx_rx_hash_type_t; typedef enum efx_rx_hash_support_e { EFX_RX_HASH_UNAVAILABLE = 0, /* Hardware hash not inserted */ EFX_RX_HASH_AVAILABLE /* Insert hash with/without RSS */ } efx_rx_hash_support_t; #define EFX_RSS_KEY_SIZE 40 /* RSS key size (bytes) */ #define EFX_RSS_TBL_SIZE 128 /* Rows in RX indirection table */ #define EFX_MAXRSS 64 /* RX indirection entry range */ #define EFX_MAXRSS_LEGACY 16 /* See bug16611 and bug17213 */ typedef enum efx_rx_scale_context_type_e { EFX_RX_SCALE_UNAVAILABLE = 0, /* No RX scale context */ EFX_RX_SCALE_EXCLUSIVE, /* Writable key/indirection table */ EFX_RX_SCALE_SHARED /* Read-only key/indirection table */ } efx_rx_scale_context_type_t; extern __checkReturn efx_rc_t efx_rx_hash_default_support_get( __in efx_nic_t *enp, __out efx_rx_hash_support_t *supportp); extern __checkReturn efx_rc_t efx_rx_scale_default_support_get( __in efx_nic_t *enp, __out efx_rx_scale_context_type_t *typep); extern __checkReturn efx_rc_t efx_rx_scale_context_alloc( __in efx_nic_t *enp, __in efx_rx_scale_context_type_t type, __in uint32_t num_queues, __out uint32_t *rss_contextp); extern __checkReturn efx_rc_t efx_rx_scale_context_free( __in efx_nic_t *enp, __in uint32_t rss_context); extern __checkReturn efx_rc_t efx_rx_scale_mode_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert); extern __checkReturn efx_rc_t efx_rx_scale_tbl_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) unsigned int *table, __in size_t n); extern __checkReturn efx_rc_t efx_rx_scale_key_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) uint8_t *key, __in size_t n); extern __checkReturn uint32_t efx_pseudo_hdr_hash_get( __in efx_rxq_t *erp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer); #endif /* EFSYS_OPT_RX_SCALE */ extern __checkReturn efx_rc_t efx_pseudo_hdr_pkt_length_get( __in efx_rxq_t *erp, __in uint8_t *buffer, __out uint16_t *pkt_lengthp); #define EFX_RXQ_MAXNDESCS 4096 #define EFX_RXQ_MINNDESCS 512 #define EFX_RXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t)) #define EFX_RXQ_NBUFS(_ndescs) (EFX_RXQ_SIZE(_ndescs) / EFX_BUF_SIZE) #define EFX_RXQ_LIMIT(_ndescs) ((_ndescs) - 16) #define EFX_RXQ_DC_NDESCS(_dcsize) (8 << _dcsize) typedef enum efx_rxq_type_e { EFX_RXQ_TYPE_DEFAULT, - EFX_RXQ_TYPE_SCATTER, - EFX_RXQ_TYPE_PACKED_STREAM_1M, - EFX_RXQ_TYPE_PACKED_STREAM_512K, - EFX_RXQ_TYPE_PACKED_STREAM_256K, - EFX_RXQ_TYPE_PACKED_STREAM_128K, - EFX_RXQ_TYPE_PACKED_STREAM_64K, + EFX_RXQ_TYPE_PACKED_STREAM, EFX_RXQ_NTYPES } efx_rxq_type_t; +/* + * Dummy flag to be used instead of 0 to make it clear that the argument + * is receive queue flags. + */ +#define EFX_RXQ_FLAG_NONE 0x0 +#define EFX_RXQ_FLAG_SCATTER 0x1 +/* + * If tunnels are supported and Rx event can provide information about + * either outer or inner packet classes (e.g. SFN8xxx adapters with + * full-feature firmware variant running), outer classes are requested by + * default. However, if the driver supports tunnels, the flag allows to + * request inner classes which are required to be able to interpret inner + * Rx checksum offload results. + */ +#define EFX_RXQ_FLAG_INNER_CLASSES 0x2 + extern __checkReturn efx_rc_t efx_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, + __in unsigned int flags, __in efx_evq_t *eep, __deref_out efx_rxq_t **erpp); +#if EFSYS_OPT_RX_PACKED_STREAM + +#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M (1U * 1024 * 1024) +#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_512K (512U * 1024) +#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_256K (256U * 1024) +#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_128K (128U * 1024) +#define EFX_RXQ_PACKED_STREAM_BUF_SIZE_64K (64U * 1024) + +extern __checkReturn efx_rc_t +efx_rx_qcreate_packed_stream( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in uint32_t ps_buf_size, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp); + +#endif + typedef struct efx_buffer_s { efsys_dma_addr_t eb_addr; size_t eb_size; boolean_t eb_eop; } efx_buffer_t; typedef struct efx_desc_s { efx_qword_t ed_eq; } efx_desc_t; -extern void +extern void efx_rx_qpost( - __in efx_rxq_t *erp, - __in_ecount(n) efsys_dma_addr_t *addrp, - __in size_t size, - __in unsigned int n, - __in unsigned int completed, - __in unsigned int added); + __in efx_rxq_t *erp, + __in_ecount(ndescs) efsys_dma_addr_t *addrp, + __in size_t size, + __in unsigned int ndescs, + __in unsigned int completed, + __in unsigned int added); extern void efx_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp); #if EFSYS_OPT_RX_PACKED_STREAM extern void efx_rx_qpush_ps_credits( __in efx_rxq_t *erp); extern __checkReturn uint8_t * efx_rx_qps_packet_info( __in efx_rxq_t *erp, __in uint8_t *buffer, __in uint32_t buffer_length, __in uint32_t current_offset, __out uint16_t *lengthp, __out uint32_t *next_offsetp, __out uint32_t *timestamp); #endif extern __checkReturn efx_rc_t efx_rx_qflush( __in efx_rxq_t *erp); extern void efx_rx_qenable( __in efx_rxq_t *erp); extern void efx_rx_qdestroy( __in efx_rxq_t *erp); /* TX */ typedef struct efx_txq_s efx_txq_t; #if EFSYS_OPT_QSTATS /* START MKCONFIG GENERATED EfxHeaderTransmitQueueBlock 12dff8778598b2db */ typedef enum efx_tx_qstat_e { TX_POST, TX_POST_PIO, TX_NQSTATS } efx_tx_qstat_t; /* END MKCONFIG GENERATED EfxHeaderTransmitQueueBlock */ #endif /* EFSYS_OPT_QSTATS */ extern __checkReturn efx_rc_t efx_tx_init( __in efx_nic_t *enp); extern void efx_tx_fini( __in efx_nic_t *enp); #define EFX_TXQ_MINNDESCS 512 #define EFX_TXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t)) #define EFX_TXQ_NBUFS(_ndescs) (EFX_TXQ_SIZE(_ndescs) / EFX_BUF_SIZE) #define EFX_TXQ_LIMIT(_ndescs) ((_ndescs) - 16) -#define EFX_TXQ_DC_NDESCS(_dcsize) (8 << _dcsize) #define EFX_TXQ_MAX_BUFS 8 /* Maximum independent of EFX_BUG35388_WORKAROUND. */ #define EFX_TXQ_CKSUM_IPV4 0x0001 #define EFX_TXQ_CKSUM_TCPUDP 0x0002 #define EFX_TXQ_FATSOV2 0x0004 #define EFX_TXQ_CKSUM_INNER_IPV4 0x0008 #define EFX_TXQ_CKSUM_INNER_TCPUDP 0x0010 extern __checkReturn efx_rc_t efx_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __deref_out efx_txq_t **etpp, __out unsigned int *addedp); -extern __checkReturn efx_rc_t +extern __checkReturn efx_rc_t efx_tx_qpost( - __in efx_txq_t *etp, - __in_ecount(n) efx_buffer_t *eb, - __in unsigned int n, - __in unsigned int completed, - __inout unsigned int *addedp); + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_buffer_t *eb, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp); extern __checkReturn efx_rc_t efx_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns); extern void efx_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed); extern __checkReturn efx_rc_t efx_tx_qflush( __in efx_txq_t *etp); extern void efx_tx_qenable( __in efx_txq_t *etp); extern __checkReturn efx_rc_t efx_tx_qpio_enable( __in efx_txq_t *etp); extern void efx_tx_qpio_disable( __in efx_txq_t *etp); extern __checkReturn efx_rc_t efx_tx_qpio_write( __in efx_txq_t *etp, __in_ecount(buf_length) uint8_t *buffer, __in size_t buf_length, __in size_t pio_buf_offset); extern __checkReturn efx_rc_t efx_tx_qpio_post( __in efx_txq_t *etp, __in size_t pkt_length, __in unsigned int completed, __inout unsigned int *addedp); extern __checkReturn efx_rc_t efx_tx_qdesc_post( __in efx_txq_t *etp, __in_ecount(n) efx_desc_t *ed, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); extern void efx_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp); extern void efx_tx_qdesc_tso_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint8_t tcp_flags, __out efx_desc_t *edp); /* Number of FATSOv2 option descriptors */ #define EFX_TX_FATSOV2_OPT_NDESCS 2 /* Maximum number of DMA segments per TSO packet (not superframe) */ #define EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX 24 extern void efx_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint16_t tcp_mss, __out_ecount(count) efx_desc_t *edp, __in int count); extern void efx_tx_qdesc_vlantci_create( __in efx_txq_t *etp, __in uint16_t tci, __out efx_desc_t *edp); #if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES extern const char * efx_tx_qstat_name( __in efx_nic_t *etp, __in unsigned int id); #endif /* EFSYS_OPT_NAMES */ extern void efx_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ extern void efx_tx_qdestroy( __in efx_txq_t *etp); /* FILTER */ #if EFSYS_OPT_FILTER #define EFX_ETHER_TYPE_IPV4 0x0800 #define EFX_ETHER_TYPE_IPV6 0x86DD #define EFX_IPPROTO_TCP 6 #define EFX_IPPROTO_UDP 17 #define EFX_IPPROTO_GRE 47 /* Use RSS to spread across multiple queues */ #define EFX_FILTER_FLAG_RX_RSS 0x01 /* Enable RX scatter */ #define EFX_FILTER_FLAG_RX_SCATTER 0x02 /* * Override an automatic filter (priority EFX_FILTER_PRI_AUTO). * May only be set by the filter implementation for each type. * A removal request will restore the automatic filter in its place. */ #define EFX_FILTER_FLAG_RX_OVER_AUTO 0x04 /* Filter is for RX */ #define EFX_FILTER_FLAG_RX 0x08 /* Filter is for TX */ #define EFX_FILTER_FLAG_TX 0x10 -typedef unsigned int efx_filter_flags_t; +typedef uint8_t efx_filter_flags_t; /* * Flags which specify the fields to match on. The values are the same as in the * MC_CMD_FILTER_OP/MC_CMD_FILTER_OP_EXT commands. */ -typedef enum efx_filter_match_flags_e { - EFX_FILTER_MATCH_REM_HOST = 0x0001, /* Match by remote IP host - * address */ - EFX_FILTER_MATCH_LOC_HOST = 0x0002, /* Match by local IP host - * address */ - EFX_FILTER_MATCH_REM_MAC = 0x0004, /* Match by remote MAC address */ - EFX_FILTER_MATCH_REM_PORT = 0x0008, /* Match by remote TCP/UDP port */ - EFX_FILTER_MATCH_LOC_MAC = 0x0010, /* Match by remote TCP/UDP port */ - EFX_FILTER_MATCH_LOC_PORT = 0x0020, /* Match by local TCP/UDP port */ - EFX_FILTER_MATCH_ETHER_TYPE = 0x0040, /* Match by Ether-type */ - EFX_FILTER_MATCH_INNER_VID = 0x0080, /* Match by inner VLAN ID */ - EFX_FILTER_MATCH_OUTER_VID = 0x0100, /* Match by outer VLAN ID */ - EFX_FILTER_MATCH_IP_PROTO = 0x0200, /* Match by IP transport - * protocol */ - /* For encapsulated packets, match all multicast inner frames */ - EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST = 0x01000000, - /* For encapsulated packets, match all unicast inner frames */ - EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST = 0x02000000, - /* Match otherwise-unmatched multicast and broadcast packets */ - EFX_FILTER_MATCH_UNKNOWN_MCAST_DST = 0x40000000, - /* Match otherwise-unmatched unicast packets */ - EFX_FILTER_MATCH_UNKNOWN_UCAST_DST = 0x80000000, -} efx_filter_match_flags_t; +/* Match by remote IP host address */ +#define EFX_FILTER_MATCH_REM_HOST 0x00000001 +/* Match by local IP host address */ +#define EFX_FILTER_MATCH_LOC_HOST 0x00000002 +/* Match by remote MAC address */ +#define EFX_FILTER_MATCH_REM_MAC 0x00000004 +/* Match by remote TCP/UDP port */ +#define EFX_FILTER_MATCH_REM_PORT 0x00000008 +/* Match by remote TCP/UDP port */ +#define EFX_FILTER_MATCH_LOC_MAC 0x00000010 +/* Match by local TCP/UDP port */ +#define EFX_FILTER_MATCH_LOC_PORT 0x00000020 +/* Match by Ether-type */ +#define EFX_FILTER_MATCH_ETHER_TYPE 0x00000040 +/* Match by inner VLAN ID */ +#define EFX_FILTER_MATCH_INNER_VID 0x00000080 +/* Match by outer VLAN ID */ +#define EFX_FILTER_MATCH_OUTER_VID 0x00000100 +/* Match by IP transport protocol */ +#define EFX_FILTER_MATCH_IP_PROTO 0x00000200 +/* For encapsulated packets, match all multicast inner frames */ +#define EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 0x01000000 +/* For encapsulated packets, match all unicast inner frames */ +#define EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST 0x02000000 +/* Match otherwise-unmatched multicast and broadcast packets */ +#define EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 0x40000000 +/* Match otherwise-unmatched unicast packets */ +#define EFX_FILTER_MATCH_UNKNOWN_UCAST_DST 0x80000000 + +typedef uint32_t efx_filter_match_flags_t; + typedef enum efx_filter_priority_s { EFX_FILTER_PRI_HINT = 0, /* Performance hint */ EFX_FILTER_PRI_AUTO, /* Automatic filter based on device * address list or hardware * requirements. This may only be used * by the filter implementation for * each NIC type. */ EFX_FILTER_PRI_MANUAL, /* Manually configured filter */ EFX_FILTER_PRI_REQUIRED, /* Required for correct behaviour of the * client (e.g. SR-IOV, HyperV VMQ etc.) */ } efx_filter_priority_t; /* * FIXME: All these fields are assumed to be in little-endian byte order. * It may be better for some to be big-endian. See bug42804. */ typedef struct efx_filter_spec_s { - uint32_t efs_match_flags; - uint32_t efs_priority:2; - uint32_t efs_flags:6; - uint32_t efs_dmaq_id:12; - uint32_t efs_rss_context; - uint16_t efs_outer_vid; - uint16_t efs_inner_vid; - uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN]; - uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN]; - uint16_t efs_ether_type; - uint8_t efs_ip_proto; - efx_tunnel_protocol_t efs_encap_type; - uint16_t efs_loc_port; - uint16_t efs_rem_port; - efx_oword_t efs_rem_host; - efx_oword_t efs_loc_host; + efx_filter_match_flags_t efs_match_flags; + uint8_t efs_priority; + efx_filter_flags_t efs_flags; + uint16_t efs_dmaq_id; + uint32_t efs_rss_context; + uint16_t efs_outer_vid; + uint16_t efs_inner_vid; + uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN]; + uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN]; + uint16_t efs_ether_type; + uint8_t efs_ip_proto; + efx_tunnel_protocol_t efs_encap_type; + uint16_t efs_loc_port; + uint16_t efs_rem_port; + efx_oword_t efs_rem_host; + efx_oword_t efs_loc_host; } efx_filter_spec_t; /* Default values for use in filter specifications */ #define EFX_FILTER_SPEC_RX_DMAQ_ID_DROP 0xfff #define EFX_FILTER_SPEC_VID_UNSPEC 0xffff extern __checkReturn efx_rc_t efx_filter_init( __in efx_nic_t *enp); extern void efx_filter_fini( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_filter_insert( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec); extern __checkReturn efx_rc_t efx_filter_remove( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec); extern __checkReturn efx_rc_t efx_filter_restore( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_filter_supported_filters( __in efx_nic_t *enp, __out_ecount(buffer_length) uint32_t *buffer, __in size_t buffer_length, __out size_t *list_lengthp); extern void efx_filter_spec_init_rx( __out efx_filter_spec_t *spec, __in efx_filter_priority_t priority, __in efx_filter_flags_t flags, __in efx_rxq_t *erp); extern void efx_filter_spec_init_tx( __out efx_filter_spec_t *spec, __in efx_txq_t *etp); extern __checkReturn efx_rc_t efx_filter_spec_set_ipv4_local( __inout efx_filter_spec_t *spec, __in uint8_t proto, __in uint32_t host, __in uint16_t port); extern __checkReturn efx_rc_t efx_filter_spec_set_ipv4_full( __inout efx_filter_spec_t *spec, __in uint8_t proto, __in uint32_t lhost, __in uint16_t lport, __in uint32_t rhost, __in uint16_t rport); extern __checkReturn efx_rc_t efx_filter_spec_set_eth_local( __inout efx_filter_spec_t *spec, __in uint16_t vid, __in const uint8_t *addr); extern void efx_filter_spec_set_ether_type( __inout efx_filter_spec_t *spec, __in uint16_t ether_type); extern __checkReturn efx_rc_t efx_filter_spec_set_uc_def( __inout efx_filter_spec_t *spec); extern __checkReturn efx_rc_t efx_filter_spec_set_mc_def( __inout efx_filter_spec_t *spec); typedef enum efx_filter_inner_frame_match_e { EFX_FILTER_INNER_FRAME_MATCH_OTHER = 0, EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_MCAST_DST, EFX_FILTER_INNER_FRAME_MATCH_UNKNOWN_UCAST_DST } efx_filter_inner_frame_match_t; extern __checkReturn efx_rc_t efx_filter_spec_set_encap_type( __inout efx_filter_spec_t *spec, __in efx_tunnel_protocol_t encap_type, __in efx_filter_inner_frame_match_t inner_frame_match); #if EFSYS_OPT_RX_SCALE extern __checkReturn efx_rc_t efx_filter_spec_set_rss_context( __inout efx_filter_spec_t *spec, __in uint32_t rss_context); #endif #endif /* EFSYS_OPT_FILTER */ /* HASH */ extern __checkReturn uint32_t efx_hash_dwords( __in_ecount(count) uint32_t const *input, __in size_t count, __in uint32_t init); extern __checkReturn uint32_t efx_hash_bytes( __in_ecount(length) uint8_t const *input, __in size_t length, __in uint32_t init); #if EFSYS_OPT_LICENSING /* LICENSING */ typedef struct efx_key_stats_s { uint32_t eks_valid; uint32_t eks_invalid; uint32_t eks_blacklisted; uint32_t eks_unverifiable; uint32_t eks_wrong_node; uint32_t eks_licensed_apps_lo; uint32_t eks_licensed_apps_hi; uint32_t eks_licensed_features_lo; uint32_t eks_licensed_features_hi; } efx_key_stats_t; extern __checkReturn efx_rc_t efx_lic_init( __in efx_nic_t *enp); extern void efx_lic_fini( __in efx_nic_t *enp); extern __checkReturn boolean_t efx_lic_check_support( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_lic_update_licenses( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_lic_get_key_stats( __in efx_nic_t *enp, __out efx_key_stats_t *ksp); extern __checkReturn efx_rc_t efx_lic_app_state( __in efx_nic_t *enp, __in uint64_t app_id, __out boolean_t *licensedp); extern __checkReturn efx_rc_t efx_lic_get_id( __in efx_nic_t *enp, __in size_t buffer_size, __out uint32_t *typep, __out size_t *lengthp, __out_opt uint8_t *bufferp); extern __checkReturn efx_rc_t efx_lic_find_start( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, - __out uint32_t *startp - ); + __out uint32_t *startp); extern __checkReturn efx_rc_t efx_lic_find_end( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, - __out uint32_t *endp - ); + __out uint32_t *endp); extern __checkReturn __success(return != B_FALSE) boolean_t efx_lic_find_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, - __out uint32_t *lengthp - ); + __out uint32_t *lengthp); extern __checkReturn __success(return != B_FALSE) boolean_t efx_lic_validate_key( __in efx_nic_t *enp, __in_bcount(length) caddr_t keyp, - __in uint32_t length - ); + __in uint32_t length); extern __checkReturn efx_rc_t efx_lic_read_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(key_max_size, *lengthp) caddr_t keyp, __in size_t key_max_size, - __out uint32_t *lengthp - ); + __out uint32_t *lengthp); extern __checkReturn efx_rc_t efx_lic_write_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, - __out uint32_t *lengthp - ); + __out uint32_t *lengthp); __checkReturn efx_rc_t efx_lic_delete_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end, - __out uint32_t *deltap - ); + __out uint32_t *deltap); extern __checkReturn efx_rc_t efx_lic_create_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, - __in size_t buffer_size - ); + __in size_t buffer_size); extern __checkReturn efx_rc_t efx_lic_finish_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, - __in size_t buffer_size - ); + __in size_t buffer_size); #endif /* EFSYS_OPT_LICENSING */ #ifdef __cplusplus } #endif #endif /* _SYS_EFX_H */ Index: projects/clang700-import/sys/dev/sfxge/common/efx_bootcfg.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_bootcfg.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_bootcfg.c (revision 340918) @@ -1,568 +1,568 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_BOOTCFG /* * Maximum size of BOOTCFG block across all nics as understood by SFCgPXE. * NOTE: This is larger than the Medford per-PF bootcfg sector. */ #define BOOTCFG_MAX_SIZE 0x1000 /* Medford per-PF bootcfg sector */ #define BOOTCFG_PER_PF 0x800 #define BOOTCFG_PF_COUNT 16 #define DHCP_END ((uint8_t)0xff) #define DHCP_PAD ((uint8_t)0) /* Report the layout of bootcfg sectors in NVRAM partition. */ __checkReturn efx_rc_t efx_bootcfg_sector_info( __in efx_nic_t *enp, __in uint32_t pf, __out_opt uint32_t *sector_countp, __out size_t *offsetp, __out size_t *max_sizep) { uint32_t count; size_t max_size; size_t offset; int rc; switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: max_size = BOOTCFG_MAX_SIZE; offset = 0; count = 1; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: max_size = BOOTCFG_MAX_SIZE; offset = 0; count = 1; break; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: { /* Shared partition (array indexed by PF) */ max_size = BOOTCFG_PER_PF; count = BOOTCFG_PF_COUNT; if (pf >= count) { rc = EINVAL; goto fail2; } offset = max_size * pf; break; } #endif /* EFSYS_OPT_MEDFORD */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail1; } EFSYS_ASSERT3U(max_size, <=, BOOTCFG_MAX_SIZE); if (sector_countp != NULL) *sector_countp = count; *offsetp = offset; *max_sizep = max_size; return (0); #if EFSYS_OPT_MEDFORD fail2: EFSYS_PROBE(fail2); #endif fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn uint8_t efx_bootcfg_csum( __in efx_nic_t *enp, __in_bcount(size) uint8_t const *data, __in size_t size) { _NOTE(ARGUNUSED(enp)) unsigned int pos; uint8_t checksum = 0; for (pos = 0; pos < size; pos++) checksum += data[pos]; return (checksum); } static __checkReturn efx_rc_t efx_bootcfg_verify( __in efx_nic_t *enp, __in_bcount(size) uint8_t const *data, __in size_t size, __out_opt size_t *usedp) { size_t offset = 0; size_t used = 0; efx_rc_t rc; /* Start parsing tags immediately after the checksum */ for (offset = 1; offset < size; ) { uint8_t tag; uint8_t length; /* Consume tag */ tag = data[offset]; if (tag == DHCP_END) { offset++; used = offset; break; } if (tag == DHCP_PAD) { offset++; continue; } /* Consume length */ if (offset + 1 >= size) { rc = ENOSPC; goto fail1; } length = data[offset + 1]; /* Consume *length */ if (offset + 1 + length >= size) { rc = ENOSPC; goto fail2; } offset += 2 + length; used = offset; } /* Checksum the entire sector, including bytes after any DHCP_END */ if (efx_bootcfg_csum(enp, data, size) != 0) { rc = EINVAL; goto fail3; } if (usedp != NULL) *usedp = used; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Copy bootcfg sector data to a target buffer which may differ in size. * Optionally corrects format errors in source buffer. */ efx_rc_t efx_bootcfg_copy_sector( __in efx_nic_t *enp, __inout_bcount(sector_length) uint8_t *sector, __in size_t sector_length, __out_bcount(data_size) uint8_t *data, __in size_t data_size, __in boolean_t handle_format_errors) { size_t used_bytes; efx_rc_t rc; /* Verify that the area is correctly formatted and checksummed */ rc = efx_bootcfg_verify(enp, sector, sector_length, &used_bytes); if (!handle_format_errors) { if (rc != 0) goto fail1; if ((used_bytes < 2) || (sector[used_bytes - 1] != DHCP_END)) { /* Block too short, or DHCP_END missing */ rc = ENOENT; goto fail2; } } /* Synthesize empty format on verification failure */ if (rc != 0 || used_bytes == 0) { sector[0] = 0; sector[1] = DHCP_END; used_bytes = 2; } EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */ EFSYS_ASSERT(used_bytes <= sector_length); EFSYS_ASSERT(sector_length >= 2); /* * Legacy bootcfg sectors don't terminate with a DHCP_END character. * Modify the returned payload so it does. * Reinitialise the sector if there isn't room for the character. */ if (sector[used_bytes - 1] != DHCP_END) { if (used_bytes >= sector_length) { sector[0] = 0; used_bytes = 1; } sector[used_bytes] = DHCP_END; ++used_bytes; } /* * Verify that the target buffer is large enough for the * entire used bootcfg area, then copy into the target buffer. */ if (used_bytes > data_size) { rc = ENOSPC; goto fail3; } memcpy(data, sector, used_bytes); /* Zero out the unused portion of the target buffer */ if (used_bytes < data_size) (void) memset(data + used_bytes, 0, data_size - used_bytes); /* * The checksum includes trailing data after any DHCP_END character, * which we've just modified (by truncation or appending DHCP_END). */ data[0] -= efx_bootcfg_csum(enp, data, data_size); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } efx_rc_t efx_bootcfg_read( __in efx_nic_t *enp, - __out_bcount(size) caddr_t data, + __out_bcount(size) uint8_t *data, __in size_t size) { uint8_t *payload = NULL; size_t used_bytes; size_t partn_length; size_t sector_length; size_t sector_offset; efx_rc_t rc; uint32_t sector_number; #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD sector_number = enp->en_nic_cfg.enc_pf; #else sector_number = 0; #endif rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length); if (rc != 0) goto fail1; /* The bootcfg sector may be stored in a (larger) shared partition */ rc = efx_bootcfg_sector_info(enp, sector_number, NULL, §or_offset, §or_length); if (rc != 0) goto fail2; if (sector_length > BOOTCFG_MAX_SIZE) sector_length = BOOTCFG_MAX_SIZE; if (sector_offset + sector_length > partn_length) { /* Partition is too small */ rc = EFBIG; goto fail3; } /* * We need to read the entire BOOTCFG sector to ensure we read all the * tags, because legacy bootcfg sectors are not guaranteed to end with * a DHCP_END character. If the user hasn't supplied a sufficiently * large buffer then use our own buffer. */ if (sector_length > size) { EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload); if (payload == NULL) { rc = ENOMEM; goto fail4; } } else payload = (uint8_t *)data; if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0) goto fail5; if ((rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, sector_offset, (caddr_t)payload, sector_length)) != 0) { (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL); goto fail6; } if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0) goto fail7; /* Verify that the area is correctly formatted and checksummed */ - rc = efx_bootcfg_verify(enp, (caddr_t)payload, sector_length, + rc = efx_bootcfg_verify(enp, payload, sector_length, &used_bytes); if (rc != 0 || used_bytes == 0) { - payload[0] = (uint8_t)~DHCP_END; + payload[0] = (uint8_t)(~DHCP_END & 0xff); payload[1] = DHCP_END; used_bytes = 2; } EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */ EFSYS_ASSERT(used_bytes <= sector_length); /* * Legacy bootcfg sectors don't terminate with a DHCP_END character. * Modify the returned payload so it does. BOOTCFG_MAX_SIZE is by * definition large enough for any valid (per-port) bootcfg sector, * so reinitialise the sector if there isn't room for the character. */ if (payload[used_bytes - 1] != DHCP_END) { if (used_bytes + 1 > sector_length) { payload[0] = 0; used_bytes = 1; } payload[used_bytes] = DHCP_END; ++used_bytes; } /* * Verify that the user supplied buffer is large enough for the * entire used bootcfg area, then copy into the user supplied buffer. */ if (used_bytes > size) { rc = ENOSPC; goto fail8; } if (sector_length > size) { memcpy(data, payload, used_bytes); EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload); } /* Zero out the unused portion of the user buffer */ if (used_bytes < size) (void) memset(data + used_bytes, 0, size - used_bytes); /* * The checksum includes trailing data after any DHCP_END character, * which we've just modified (by truncation or appending DHCP_END). */ data[0] -= efx_bootcfg_csum(enp, data, size); return (0); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); if (sector_length > size) EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } efx_rc_t efx_bootcfg_write( __in efx_nic_t *enp, - __in_bcount(size) caddr_t data, + __in_bcount(size) uint8_t *data, __in size_t size) { uint8_t *partn_data; uint8_t checksum; size_t partn_length; size_t sector_length; size_t sector_offset; size_t used_bytes; efx_rc_t rc; uint32_t sector_number; #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD sector_number = enp->en_nic_cfg.enc_pf; #else sector_number = 0; #endif rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length); if (rc != 0) goto fail1; /* The bootcfg sector may be stored in a (larger) shared partition */ rc = efx_bootcfg_sector_info(enp, sector_number, NULL, §or_offset, §or_length); if (rc != 0) goto fail2; if (sector_length > BOOTCFG_MAX_SIZE) sector_length = BOOTCFG_MAX_SIZE; if (sector_offset + sector_length > partn_length) { /* Partition is too small */ rc = EFBIG; goto fail3; } if ((rc = efx_bootcfg_verify(enp, data, size, &used_bytes)) != 0) goto fail4; /* The caller *must* terminate their block with a DHCP_END character */ if ((used_bytes < 2) || ((uint8_t)data[used_bytes - 1] != DHCP_END)) { /* Block too short or DHCP_END missing */ rc = ENOENT; goto fail5; } /* Check that the hardware has support for this much data */ if (used_bytes > MIN(sector_length, BOOTCFG_MAX_SIZE)) { rc = ENOSPC; goto fail6; } /* * If the BOOTCFG sector is stored in a shared partition, then we must * read the whole partition and insert the updated bootcfg sector at the * correct offset. */ EFSYS_KMEM_ALLOC(enp->en_esip, partn_length, partn_data); if (partn_data == NULL) { rc = ENOMEM; goto fail7; } rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL); if (rc != 0) goto fail8; /* Read the entire partition */ rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, 0, (caddr_t)partn_data, partn_length); if (rc != 0) goto fail9; /* * Insert the BOOTCFG sector into the partition, Zero out all data after * the DHCP_END tag, and adjust the checksum. */ (void) memset(partn_data + sector_offset, 0x0, sector_length); (void) memcpy(partn_data + sector_offset, data, used_bytes); checksum = efx_bootcfg_csum(enp, data, used_bytes); partn_data[sector_offset] -= checksum; if ((rc = efx_nvram_erase(enp, EFX_NVRAM_BOOTROM_CFG)) != 0) goto fail10; if ((rc = efx_nvram_write_chunk(enp, EFX_NVRAM_BOOTROM_CFG, 0, (caddr_t)partn_data, partn_length)) != 0) goto fail11; if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0) goto fail12; EFSYS_KMEM_FREE(enp->en_esip, partn_length, partn_data); return (0); fail12: EFSYS_PROBE(fail12); fail11: EFSYS_PROBE(fail11); fail10: EFSYS_PROBE(fail10); fail9: EFSYS_PROBE(fail9); (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL); fail8: EFSYS_PROBE(fail8); EFSYS_KMEM_FREE(enp->en_esip, partn_length, partn_data); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_BOOTCFG */ Index: projects/clang700-import/sys/dev/sfxge/common/efx_check.h =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_check.h (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_check.h (revision 340918) @@ -1,349 +1,349 @@ /*- * Copyright (c) 2012-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_EFX_CHECK_H #define _SYS_EFX_CHECK_H #include "efsys.h" /* * Check that the efsys.h header in client code has a valid combination of * EFSYS_OPT_xxx options. * * NOTE: Keep checks for obsolete options here to ensure that they are removed * from client code (and do not reappear in merges from other branches). */ #ifdef EFSYS_OPT_FALCON # error "FALCON is obsolete and is not supported." #endif /* Support NVRAM based boot config */ #if EFSYS_OPT_BOOTCFG # if !EFSYS_OPT_NVRAM # error "BOOTCFG requires NVRAM" # endif #endif /* EFSYS_OPT_BOOTCFG */ /* Verify chip implements accessed registers */ #if EFSYS_OPT_CHECK_REG # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_CHECK_REG */ /* Decode fatal errors */ #if EFSYS_OPT_DECODE_INTR_FATAL # if !EFSYS_OPT_SIENA # error "INTR_FATAL requires SIENA" # endif #endif /* EFSYS_OPT_DECODE_INTR_FATAL */ /* Support diagnostic hardware tests */ #if EFSYS_OPT_DIAG # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "DIAG requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_DIAG */ /* Support optimized EVQ data access */ #if EFSYS_OPT_EV_PREFETCH # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_EV_PREFETCH */ #ifdef EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE # error "FALCON_NIC_CFG_OVERRIDE is obsolete and is not supported." #endif /* Support hardware packet filters */ #if EFSYS_OPT_FILTER # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "FILTER requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_FILTER */ #if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # if !EFSYS_OPT_FILTER # error "HUNTINGTON or MEDFORD requires FILTER" # endif #endif /* EFSYS_OPT_HUNTINGTON */ /* Support hardware loopback modes */ #if EFSYS_OPT_LOOPBACK # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_LOOPBACK */ #ifdef EFSYS_OPT_MAC_FALCON_GMAC # error "MAC_FALCON_GMAC is obsolete and is not supported." #endif #ifdef EFSYS_OPT_MAC_FALCON_XMAC # error "MAC_FALCON_XMAC is obsolete and is not supported." #endif /* Support MAC statistics */ #if EFSYS_OPT_MAC_STATS # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_MAC_STATS */ /* Support management controller messages */ #if EFSYS_OPT_MCDI # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "MCDI requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_MCDI */ #if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # if !EFSYS_OPT_MCDI # error "SIENA or HUNTINGTON or MEDFORD requires MCDI" # endif #endif /* Support MCDI logging */ #if EFSYS_OPT_MCDI_LOGGING # if !EFSYS_OPT_MCDI # error "MCDI_LOGGING requires MCDI" # endif #endif /* EFSYS_OPT_MCDI_LOGGING */ /* Support MCDI proxy authorization */ #if EFSYS_OPT_MCDI_PROXY_AUTH # if !EFSYS_OPT_MCDI # error "MCDI_PROXY_AUTH requires MCDI" # endif #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ #ifdef EFSYS_OPT_MON_LM87 # error "MON_LM87 is obsolete and is not supported." #endif #ifdef EFSYS_OPT_MON_MAX6647 # error "MON_MAX6647 is obsolete and is not supported." #endif #ifdef EFSYS_OPT_MON_NULL # error "MON_NULL is obsolete and is not supported." #endif #ifdef EFSYS_OPT_MON_SIENA # error "MON_SIENA is obsolete (replaced by MON_MCDI)." #endif #ifdef EFSYS_OPT_MON_HUNTINGTON # error "MON_HUNTINGTON is obsolete (replaced by MON_MCDI)." #endif /* Support monitor statistics (voltage/temperature) */ #if EFSYS_OPT_MON_STATS # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_MON_STATS */ /* Support Monitor via mcdi */ #if EFSYS_OPT_MON_MCDI # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_MON_MCDI*/ /* Support printable names for statistics */ #if EFSYS_OPT_NAMES # if !(EFSYS_OPT_LOOPBACK || EFSYS_OPT_MAC_STATS || EFSYS_OPT_MCDI || \ EFSYS_MON_STATS || EFSYS_OPT_PHY_STATS || EFSYS_OPT_QSTATS) # error "NAMES requires LOOPBACK or xxxSTATS or MCDI" # endif #endif /* EFSYS_OPT_NAMES */ /* Support non volatile configuration */ #if EFSYS_OPT_NVRAM # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "NVRAM requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_NVRAM */ #ifdef EFSYS_OPT_NVRAM_FALCON_BOOTROM # error "NVRAM_FALCON_BOOTROM is obsolete and is not supported." #endif #ifdef EFSYS_OPT_NVRAM_SFT9001 # error "NVRAM_SFT9001 is obsolete and is not supported." #endif #ifdef EFSYS_OPT_NVRAM_SFX7101 # error "NVRAM_SFX7101 is obsolete and is not supported." #endif #ifdef EFSYS_OPT_PCIE_TUNE # error "PCIE_TUNE is obsolete and is not supported." #endif #ifdef EFSYS_OPT_PHY_BIST # error "PHY_BIST is obsolete (replaced by BIST)." #endif /* Support PHY flags */ #if EFSYS_OPT_PHY_FLAGS # if !EFSYS_OPT_SIENA # error "PHY_FLAGS requires SIENA" # endif #endif /* EFSYS_OPT_PHY_FLAGS */ /* Support for PHY LED control */ #if EFSYS_OPT_PHY_LED_CONTROL -# if !EFSYS_OPT_SIENA -# error "PHY_LED_CONTROL requires SIENA" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) +# error "PHY_LED_CONTROL requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_PHY_LED_CONTROL */ #ifdef EFSYS_OPT_PHY_NULL # error "PHY_NULL is obsolete and is not supported." #endif #ifdef EFSYS_OPT_PHY_PM8358 # error "PHY_PM8358 is obsolete and is not supported." #endif #ifdef EFSYS_OPT_PHY_PROPS # error "PHY_PROPS is obsolete and is not supported." #endif #ifdef EFSYS_OPT_PHY_QT2022C2 # error "PHY_QT2022C2 is obsolete and is not supported." #endif #ifdef EFSYS_OPT_PHY_QT2025C # error "PHY_QT2025C is obsolete and is not supported." #endif #ifdef EFSYS_OPT_PHY_SFT9001 # error "PHY_SFT9001 is obsolete and is not supported." #endif #ifdef EFSYS_OPT_PHY_SFX7101 # error "PHY_SFX7101 is obsolete and is not supported." #endif /* Support PHY statistics */ #if EFSYS_OPT_PHY_STATS -# if !EFSYS_OPT_SIENA -# error "PHY_STATS requires SIENA" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) +# error "PHY_STATS requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_PHY_STATS */ #ifdef EFSYS_OPT_PHY_TXC43128 # error "PHY_TXC43128 is obsolete and is not supported." #endif /* Support EVQ/RXQ/TXQ statistics */ #if EFSYS_OPT_QSTATS # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "QSTATS requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_QSTATS */ #ifdef EFSYS_OPT_RX_HDR_SPLIT # error "RX_HDR_SPLIT is obsolete and is not supported" #endif /* Support receive scaling (RSS) */ #if EFSYS_OPT_RX_SCALE # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_RX_SCALE */ /* Support receive scatter DMA */ #if EFSYS_OPT_RX_SCATTER # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_RX_SCATTER */ #ifdef EFSYS_OPT_STAT_NAME # error "STAT_NAME is obsolete (replaced by NAMES)." #endif /* Support PCI Vital Product Data (VPD) */ #if EFSYS_OPT_VPD # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "VPD requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_VPD */ /* Support Wake on LAN */ #ifdef EFSYS_OPT_WOL # error "WOL is obsolete and is not supported" #endif /* EFSYS_OPT_WOL */ #ifdef EFSYS_OPT_MCAST_FILTER_LIST # error "MCAST_FILTER_LIST is obsolete and is not supported" #endif /* Support BIST */ #if EFSYS_OPT_BIST # if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "BIST requires SIENA or HUNTINGTON or MEDFORD" # endif #endif /* EFSYS_OPT_BIST */ /* Support MCDI licensing API */ #if EFSYS_OPT_LICENSING # if !EFSYS_OPT_MCDI # error "LICENSING requires MCDI" # endif # if !EFSYS_HAS_UINT64 # error "LICENSING requires UINT64" # endif #endif /* EFSYS_OPT_LICENSING */ /* Support adapters with missing static config (for factory use only) */ #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC # if !EFSYS_OPT_MEDFORD # error "ALLOW_UNCONFIGURED_NIC requires MEDFORD" # endif #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ /* Support packed stream mode */ #if EFSYS_OPT_RX_PACKED_STREAM # if !(EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) # error "PACKED_STREAM requires HUNTINGTON or MEDFORD" # endif #endif #endif /* _SYS_EFX_CHECK_H */ Index: projects/clang700-import/sys/dev/sfxge/common/efx_ev.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_ev.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_ev.c (revision 340918) @@ -1,1475 +1,1478 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_MON_MCDI #include "mcdi_mon.h" #endif #if EFSYS_OPT_QSTATS #define EFX_EV_QSTAT_INCR(_eep, _stat) \ do { \ (_eep)->ee_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_EV_QSTAT_INCR(_eep, _stat) #endif #define EFX_EV_PRESENT(_qword) \ (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \ EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff) #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t siena_ev_init( __in efx_nic_t *enp); static void siena_ev_fini( __in efx_nic_t *enp); static __checkReturn efx_rc_t siena_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, __in uint32_t us, __in uint32_t flags, __in efx_evq_t *eep); static void siena_ev_qdestroy( __in efx_evq_t *eep); static __checkReturn efx_rc_t siena_ev_qprime( __in efx_evq_t *eep, __in unsigned int count); static void siena_ev_qpost( __in efx_evq_t *eep, __in uint16_t data); static __checkReturn efx_rc_t siena_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us); #if EFSYS_OPT_QSTATS static void siena_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); #endif #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA static const efx_ev_ops_t __efx_ev_siena_ops = { siena_ev_init, /* eevo_init */ siena_ev_fini, /* eevo_fini */ siena_ev_qcreate, /* eevo_qcreate */ siena_ev_qdestroy, /* eevo_qdestroy */ siena_ev_qprime, /* eevo_qprime */ siena_ev_qpost, /* eevo_qpost */ siena_ev_qmoderate, /* eevo_qmoderate */ #if EFSYS_OPT_QSTATS siena_ev_qstats_update, /* eevo_qstats_update */ #endif }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD static const efx_ev_ops_t __efx_ev_ef10_ops = { ef10_ev_init, /* eevo_init */ ef10_ev_fini, /* eevo_fini */ ef10_ev_qcreate, /* eevo_qcreate */ ef10_ev_qdestroy, /* eevo_qdestroy */ ef10_ev_qprime, /* eevo_qprime */ ef10_ev_qpost, /* eevo_qpost */ ef10_ev_qmoderate, /* eevo_qmoderate */ #if EFSYS_OPT_QSTATS ef10_ev_qstats_update, /* eevo_qstats_update */ #endif }; #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_ev_init( __in efx_nic_t *enp) { const efx_ev_ops_t *eevop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); if (enp->en_mod_flags & EFX_MOD_EV) { rc = EINVAL; goto fail1; } switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: eevop = &__efx_ev_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: eevop = &__efx_ev_ef10_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: eevop = &__efx_ev_ef10_ops; break; #endif /* EFSYS_OPT_MEDFORD */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail1; } EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); if ((rc = eevop->eevo_init(enp)) != 0) goto fail2; enp->en_eevop = eevop; enp->en_mod_flags |= EFX_MOD_EV; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_eevop = NULL; enp->en_mod_flags &= ~EFX_MOD_EV; return (rc); } void efx_ev_fini( __in efx_nic_t *enp) { const efx_ev_ops_t *eevop = enp->en_eevop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); eevop->eevo_fini(enp); enp->en_eevop = NULL; enp->en_mod_flags &= ~EFX_MOD_EV; } __checkReturn efx_rc_t efx_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, __in uint32_t us, __in uint32_t flags, __deref_out efx_evq_t **eepp) { const efx_ev_ops_t *eevop = enp->en_eevop; - efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_evq_t *eep; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); - EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit); + EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, + enp->en_nic_cfg.enc_evq_limit); switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) { case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT: break; case EFX_EVQ_FLAGS_NOTIFY_DISABLED: if (us != 0) { rc = EINVAL; goto fail1; } break; default: rc = EINVAL; goto fail2; } /* Allocate an EVQ object */ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep); if (eep == NULL) { rc = ENOMEM; goto fail3; } eep->ee_magic = EFX_EVQ_MAGIC; eep->ee_enp = enp; eep->ee_index = index; - eep->ee_mask = n - 1; + eep->ee_mask = ndescs - 1; eep->ee_flags = flags; eep->ee_esmp = esmp; /* * Set outputs before the queue is created because interrupts may be * raised for events immediately after the queue is created, before the * function call below returns. See bug58606. * * The eepp pointer passed in by the client must therefore point to data * shared with the client's event processing context. */ enp->en_ev_qcount++; *eepp = eep; - if ((rc = eevop->eevo_qcreate(enp, index, esmp, n, id, us, flags, + if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags, eep)) != 0) goto fail4; return (0); fail4: EFSYS_PROBE(fail4); *eepp = NULL; enp->en_ev_qcount--; EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_ev_qdestroy( __in efx_evq_t *eep) { efx_nic_t *enp = eep->ee_enp; const efx_ev_ops_t *eevop = enp->en_eevop; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); EFSYS_ASSERT(enp->en_ev_qcount != 0); --enp->en_ev_qcount; eevop->eevo_qdestroy(eep); /* Free the EVQ object */ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep); } __checkReturn efx_rc_t efx_ev_qprime( __in efx_evq_t *eep, __in unsigned int count) { efx_nic_t *enp = eep->ee_enp; const efx_ev_ops_t *eevop = enp->en_eevop; efx_rc_t rc; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); if (!(enp->en_mod_flags & EFX_MOD_INTR)) { rc = EINVAL; goto fail1; } if ((rc = eevop->eevo_qprime(eep, count)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn boolean_t efx_ev_qpending( __in efx_evq_t *eep, __in unsigned int count) { size_t offset; efx_qword_t qword; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword); return (EFX_EV_PRESENT(qword)); } #if EFSYS_OPT_EV_PREFETCH void efx_ev_qprefetch( __in efx_evq_t *eep, __in unsigned int count) { unsigned int offset; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); } #endif /* EFSYS_OPT_EV_PREFETCH */ #define EFX_EV_BATCH 8 void efx_ev_qpoll( __in efx_evq_t *eep, __inout unsigned int *countp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_qword_t ev[EFX_EV_BATCH]; unsigned int batch; unsigned int total; unsigned int count; unsigned int index; size_t offset; /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */ EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN); EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH); EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV); EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV); EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV); EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV == FSE_AZ_EV_CODE_DRV_GEN_EV); #if EFSYS_OPT_MCDI EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV == FSE_AZ_EV_CODE_MCDI_EVRESPONSE); #endif EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); EFSYS_ASSERT(countp != NULL); EFSYS_ASSERT(eecp != NULL); count = *countp; do { /* Read up until the end of the batch period */ batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1)); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); for (total = 0; total < batch; ++total) { EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total])); if (!EFX_EV_PRESENT(ev[total])) break; EFSYS_PROBE3(event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0)); offset += sizeof (efx_qword_t); } #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1) /* * Prefetch the next batch when we get within PREFETCH_PERIOD * of a completed batch. If the batch is smaller, then prefetch * immediately. */ if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD) EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); #endif /* EFSYS_OPT_EV_PREFETCH */ /* Process the batch of events */ for (index = 0; index < total; ++index) { boolean_t should_abort; uint32_t code; #if EFSYS_OPT_EV_PREFETCH /* Prefetch if we've now reached the batch period */ if (total == batch && index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) { offset = (count + batch) & eep->ee_mask; offset *= sizeof (efx_qword_t); EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); } #endif /* EFSYS_OPT_EV_PREFETCH */ EFX_EV_QSTAT_INCR(eep, EV_ALL); code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE); switch (code) { case FSE_AZ_EV_CODE_RX_EV: should_abort = eep->ee_rx(eep, &(ev[index]), eecp, arg); break; case FSE_AZ_EV_CODE_TX_EV: should_abort = eep->ee_tx(eep, &(ev[index]), eecp, arg); break; case FSE_AZ_EV_CODE_DRIVER_EV: should_abort = eep->ee_driver(eep, &(ev[index]), eecp, arg); break; case FSE_AZ_EV_CODE_DRV_GEN_EV: should_abort = eep->ee_drv_gen(eep, &(ev[index]), eecp, arg); break; #if EFSYS_OPT_MCDI case FSE_AZ_EV_CODE_MCDI_EVRESPONSE: should_abort = eep->ee_mcdi(eep, &(ev[index]), eecp, arg); break; #endif case FSE_AZ_EV_CODE_GLOBAL_EV: if (eep->ee_global) { should_abort = eep->ee_global(eep, &(ev[index]), eecp, arg); break; } /* else fallthrough */ default: EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(ev[index], EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(ev[index], EFX_DWORD_0)); EFSYS_ASSERT(eecp->eec_exception != NULL); (void) eecp->eec_exception(arg, EFX_EXCEPTION_EV_ERROR, code); should_abort = B_TRUE; } if (should_abort) { /* Ignore subsequent events */ total = index + 1; break; } } /* * Now that the hardware has most likely moved onto dma'ing * into the next cache line, clear the processed events. Take * care to only clear out events that we've processed */ EFX_SET_QWORD(ev[0]); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); for (index = 0; index < total; ++index) { EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0])); offset += sizeof (efx_qword_t); } count += total; } while (total == batch); *countp = count; } void efx_ev_qpost( __in efx_evq_t *eep, __in uint16_t data) { efx_nic_t *enp = eep->ee_enp; const efx_ev_ops_t *eevop = enp->en_eevop; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); EFSYS_ASSERT(eevop != NULL && eevop->eevo_qpost != NULL); eevop->eevo_qpost(eep, data); } __checkReturn efx_rc_t efx_ev_usecs_to_ticks( __in efx_nic_t *enp, __in unsigned int us, __out unsigned int *ticksp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); unsigned int ticks; /* Convert microseconds to a timer tick count */ if (us == 0) ticks = 0; else if (us * 1000 < encp->enc_evq_timer_quantum_ns) ticks = 1; /* Never round down to zero */ else ticks = us * 1000 / encp->enc_evq_timer_quantum_ns; *ticksp = ticks; return (0); } __checkReturn efx_rc_t efx_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us) { efx_nic_t *enp = eep->ee_enp; const efx_ev_ops_t *eevop = enp->en_eevop; efx_rc_t rc; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == EFX_EVQ_FLAGS_NOTIFY_DISABLED) { rc = EINVAL; goto fail1; } if ((rc = eevop->eevo_qmoderate(eep, us)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_QSTATS void efx_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) { efx_nic_t *enp = eep->ee_enp; const efx_ev_ops_t *eevop = enp->en_eevop; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); eevop->eevo_qstats_update(eep, stat); } #endif /* EFSYS_OPT_QSTATS */ #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t siena_ev_init( __in efx_nic_t *enp) { efx_oword_t oword; /* * Program the event queue for receive and transmit queue * flush events. */ EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0); EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword); return (0); } static __checkReturn boolean_t siena_ev_rx_not_ok( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in uint32_t label, __in uint32_t id, __inout uint16_t *flagsp) { boolean_t ignore = B_FALSE; if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC); EFSYS_PROBE(tobe_disc); /* * Assume this is a unicast address mismatch, unless below * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or * EV_RX_PAUSE_FRM_ERR is set. */ (*flagsp) |= EFX_ADDR_MISMATCH; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) { EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id); EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); (*flagsp) |= EFX_DISCARD; #if EFSYS_OPT_RX_SCATTER /* * Lookout for payload queue ran dry errors and ignore them. * * Sadly for the header/data split cases, the descriptor * pointer in this event refers to the header queue and * therefore cannot be easily detected as duplicate. * So we drop these and rely on the receive processing seeing * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard * the partially received packet. */ if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) && (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) && (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0)) ignore = B_TRUE; #endif /* EFSYS_OPT_RX_SCATTER */ } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); EFSYS_PROBE(crc_err); (*flagsp) &= ~EFX_ADDR_MISMATCH; (*flagsp) |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR); EFSYS_PROBE(pause_frm_err); (*flagsp) &= ~EFX_ADDR_MISMATCH; (*flagsp) |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR); EFSYS_PROBE(owner_id_err); (*flagsp) |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); EFSYS_PROBE(ipv4_err); (*flagsp) &= ~EFX_CKSUM_IPV4; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); EFSYS_PROBE(udp_chk_err); (*flagsp) &= ~EFX_CKSUM_TCPUDP; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR); /* * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error * condition. */ (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP); } return (ignore); } static __checkReturn boolean_t siena_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t id; uint32_t size; uint32_t label; boolean_t ok; #if EFSYS_OPT_RX_SCATTER boolean_t sop; boolean_t jumbo_cont; #endif /* EFSYS_OPT_RX_SCATTER */ uint32_t hdr_type; boolean_t is_v6; uint16_t flags; boolean_t ignore; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_RX); /* Basic packet information */ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR); size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT); label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL); ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0); #if EFSYS_OPT_RX_SCATTER sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0); jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0); #endif /* EFSYS_OPT_RX_SCATTER */ hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE); is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0); /* * If packet is marked as OK and packet type is TCP/IP or * UDP/IP or other IP, then we can rely on the hardware checksums. */ switch (hdr_type) { case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP: flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP; if (is_v6) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); flags |= EFX_PKT_IPV6; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; } break; case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP: flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP; if (is_v6) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); flags |= EFX_PKT_IPV6; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; } break; case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: if (is_v6) { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); flags = EFX_PKT_IPV6; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4; } break; case FSE_AZ_RX_EV_HDR_TYPE_OTHER: EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); flags = 0; break; default: EFSYS_ASSERT(B_FALSE); flags = 0; break; } #if EFSYS_OPT_RX_SCATTER /* Report scatter and header/lookahead split buffer flags */ if (sop) flags |= EFX_PKT_START; if (jumbo_cont) flags |= EFX_PKT_CONT; #endif /* EFSYS_OPT_RX_SCATTER */ /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */ if (!ok) { ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags); if (ignore) { EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, uint32_t, size, uint16_t, flags); return (B_FALSE); } } /* If we're not discarding the packet then it is ok */ if (~flags & EFX_DISCARD) EFX_EV_QSTAT_INCR(eep, EV_RX_OK); /* Detect multicast packets that didn't match the filter */ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH); } else { EFSYS_PROBE(mcast_mismatch); flags |= EFX_ADDR_MISMATCH; } } else { flags |= EFX_PKT_UNICAST; } /* * The packet parser in Siena can abort parsing packets under * certain error conditions, setting the PKT_NOT_PARSED bit * (which clears PKT_OK). If this is set, then don't trust * the PKT_TYPE field. */ if (!ok) { uint32_t parse_err; parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED); if (parse_err != 0) flags |= EFX_CHECK_VLAN; } if (~flags & EFX_CHECK_VLAN) { uint32_t pkt_type; pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE); if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN) flags |= EFX_PKT_VLAN_TAGGED; } EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, uint32_t, size, uint16_t, flags); EFSYS_ASSERT(eecp->eec_rx != NULL); should_abort = eecp->eec_rx(arg, label, id, size, flags); return (should_abort); } static __checkReturn boolean_t siena_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t id; uint32_t label; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_TX); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 && EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 && EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 && EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) { id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR); label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL); EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); EFSYS_ASSERT(eecp->eec_tx != NULL); should_abort = eecp->eec_tx(arg, label, id); return (should_abort); } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0) EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0) EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0) EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0) EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL); EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED); return (B_FALSE); } static __checkReturn boolean_t siena_ev_global( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { _NOTE(ARGUNUSED(eqp, eecp, arg)) EFX_EV_QSTAT_INCR(eep, EV_GLOBAL); return (B_FALSE); } static __checkReturn boolean_t siena_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRIVER); should_abort = B_FALSE; switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) { case FSE_AZ_TX_DESCQ_FLS_DONE_EV: { uint32_t txq_index; EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); should_abort = eecp->eec_txq_flush_done(arg, txq_index); break; } case FSE_AZ_RX_DESCQ_FLS_DONE_EV: { uint32_t rxq_index; uint32_t failed; rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL); if (failed) { EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED); EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index); should_abort = eecp->eec_rxq_flush_failed(arg, rxq_index); } else { EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); } break; } case FSE_AZ_EVQ_INIT_DONE_EV: EFSYS_ASSERT(eecp->eec_initialized != NULL); should_abort = eecp->eec_initialized(arg); break; case FSE_AZ_EVQ_NOT_EN_EV: EFSYS_PROBE(evq_not_en); break; case FSE_AZ_SRM_UPD_DONE_EV: { uint32_t code; EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE); code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_ASSERT(eecp->eec_sram != NULL); should_abort = eecp->eec_sram(arg, code); break; } case FSE_AZ_WAKE_UP_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_ASSERT(eecp->eec_wake_up != NULL); should_abort = eecp->eec_wake_up(arg, id); break; } case FSE_AZ_TX_PKT_NON_TCP_UDP: EFSYS_PROBE(tx_pkt_non_tcp_udp); break; case FSE_AZ_TIMER_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_ASSERT(eecp->eec_timer != NULL); should_abort = eecp->eec_timer(arg, id); break; } case FSE_AZ_RX_DSC_ERROR_EV: EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR); EFSYS_PROBE(rx_dsc_error); EFSYS_ASSERT(eecp->eec_exception != NULL); should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_RX_DSC_ERROR, 0); break; case FSE_AZ_TX_DSC_ERROR_EV: EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR); EFSYS_PROBE(tx_dsc_error); EFSYS_ASSERT(eecp->eec_exception != NULL); should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_TX_DSC_ERROR, 0); break; default: break; } return (should_abort); } static __checkReturn boolean_t siena_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t data; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0); if (data >= ((uint32_t)1 << 16)) { EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); return (B_TRUE); } EFSYS_ASSERT(eecp->eec_software != NULL); should_abort = eecp->eec_software(arg, (uint16_t)data); return (should_abort); } #if EFSYS_OPT_MCDI static __checkReturn boolean_t siena_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; unsigned int code; boolean_t should_abort = B_FALSE; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); if (enp->en_family != EFX_FAMILY_SIENA) goto out; EFSYS_ASSERT(eecp->eec_link_change != NULL); EFSYS_ASSERT(eecp->eec_exception != NULL); #if EFSYS_OPT_MON_STATS EFSYS_ASSERT(eecp->eec_monitor != NULL); #endif EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); switch (code) { case MCDI_EVENT_CODE_BADSSERT: efx_mcdi_ev_death(enp, EINTR); break; case MCDI_EVENT_CODE_CMDDONE: efx_mcdi_ev_cpl(enp, MCDI_EV_FIELD(eqp, CMDDONE_SEQ), MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); break; case MCDI_EVENT_CODE_LINKCHANGE: { efx_link_mode_t link_mode; siena_phy_link_ev(enp, eqp, &link_mode); should_abort = eecp->eec_link_change(arg, link_mode); break; } case MCDI_EVENT_CODE_SENSOREVT: { #if EFSYS_OPT_MON_STATS efx_mon_stat_t id; efx_mon_stat_value_t value; efx_rc_t rc; if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) should_abort = eecp->eec_monitor(arg, id, value); else if (rc == ENOTSUP) { should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_SENSOREVT, MCDI_EV_FIELD(eqp, DATA)); } else EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ #else should_abort = B_FALSE; #endif break; } case MCDI_EVENT_CODE_SCHEDERR: /* Informational only */ break; case MCDI_EVENT_CODE_REBOOT: efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: #if EFSYS_OPT_MAC_STATS if (eecp->eec_mac_stats != NULL) { eecp->eec_mac_stats(arg, MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); } #endif break; case MCDI_EVENT_CODE_FWALERT: { uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_FWALERT_SRAM, MCDI_EV_FIELD(eqp, FWALERT_DATA)); else should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_FWALERT, MCDI_EV_FIELD(eqp, DATA)); break; } default: EFSYS_PROBE1(mc_pcol_error, int, code); break; } out: return (should_abort); } #endif /* EFSYS_OPT_MCDI */ static __checkReturn efx_rc_t siena_ev_qprime( __in efx_evq_t *eep, __in unsigned int count) { efx_nic_t *enp = eep->ee_enp; uint32_t rptr; efx_dword_t dword; rptr = count & eep->ee_mask; EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr); EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index, &dword, B_FALSE); return (0); } static void siena_ev_qpost( __in efx_evq_t *eep, __in uint16_t data) { efx_nic_t *enp = eep->ee_enp; efx_qword_t ev; efx_oword_t oword; EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV, FSF_AZ_EV_DATA_DW0, (uint32_t)data); EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index, EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0), EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1)); EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword); } static __checkReturn efx_rc_t siena_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us) { efx_nic_t *enp = eep->ee_enp; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); unsigned int locked; efx_dword_t dword; efx_rc_t rc; if (us > encp->enc_evq_timer_max_us) { rc = EINVAL; goto fail1; } /* If the value is zero then disable the timer */ if (us == 0) { EFX_POPULATE_DWORD_2(dword, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS, FRF_CZ_TC_TIMER_VAL, 0); } else { unsigned int ticks; if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) goto fail2; EFSYS_ASSERT(ticks > 0); EFX_POPULATE_DWORD_2(dword, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF, FRF_CZ_TC_TIMER_VAL, ticks - 1); } locked = (eep->ee_index == 0) ? 1 : 0; EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0, eep->ee_index, &dword, locked); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t siena_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, __in uint32_t us, __in uint32_t flags, __in efx_evq_t *eep) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t size; efx_oword_t oword; efx_rc_t rc; boolean_t notify_mode; _NOTE(ARGUNUSED(esmp)) EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS)); EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS)); - if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) { + if (!ISP2(ndescs) || + (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_evq_limit) { rc = EINVAL; goto fail2; } #if EFSYS_OPT_RX_SCALE if (enp->en_intr.ei_type == EFX_INTR_LINE && index >= EFX_MAXRSS_LEGACY) { rc = EINVAL; goto fail3; } #endif for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS); size++) - if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS)) + if ((1 << size) == (int)(ndescs / EFX_EVQ_MINNEVS)) break; if (id + (1 << size) >= encp->enc_buftbl_limit) { rc = EINVAL; goto fail4; } /* Set up the handler table */ eep->ee_rx = siena_ev_rx; eep->ee_tx = siena_ev_tx; eep->ee_driver = siena_ev_driver; eep->ee_global = siena_ev_global; eep->ee_drv_gen = siena_ev_drv_gen; #if EFSYS_OPT_MCDI eep->ee_mcdi = siena_ev_mcdi; #endif /* EFSYS_OPT_MCDI */ notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) != EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); /* Set up the new event queue */ EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1, FRF_CZ_HOST_NOTIFY_MODE, notify_mode, FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE); EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size, FRF_AZ_EVQ_BUF_BASE_ID, id); EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE); /* Set initial interrupt moderation */ siena_ev_qmoderate(eep, us); return (0); fail4: EFSYS_PROBE(fail4); #if EFSYS_OPT_RX_SCALE fail3: EFSYS_PROBE(fail3); #endif fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock c0f3bc5083b40532 */ static const char * const __efx_ev_qstat_name[] = { "all", "rx", "rx_ok", "rx_frm_trunc", "rx_tobe_disc", "rx_pause_frm_err", "rx_buf_owner_id_err", "rx_ipv4_hdr_chksum_err", "rx_tcp_udp_chksum_err", "rx_eth_crc_err", "rx_ip_frag_err", "rx_mcast_pkt", "rx_mcast_hash_match", "rx_tcp_ipv4", "rx_tcp_ipv6", "rx_udp_ipv4", "rx_udp_ipv6", "rx_other_ipv4", "rx_other_ipv6", "rx_non_ip", "rx_batch", "tx", "tx_wq_ff_full", "tx_pkt_err", "tx_pkt_too_big", "tx_unexpected", "global", "global_mnt", "driver", "driver_srm_upd_done", "driver_tx_descq_fls_done", "driver_rx_descq_fls_done", "driver_rx_descq_fls_failed", "driver_rx_dsc_error", "driver_tx_dsc_error", "drv_gen", "mcdi_response", }; /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */ const char * efx_ev_qstat_name( __in efx_nic_t *enp, __in unsigned int id) { + _NOTE(ARGUNUSED(enp)) + EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(id, <, EV_NQSTATS); return (__efx_ev_qstat_name[id]); } #endif /* EFSYS_OPT_NAMES */ #endif /* EFSYS_OPT_QSTATS */ #if EFSYS_OPT_SIENA #if EFSYS_OPT_QSTATS static void siena_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) { unsigned int id; for (id = 0; id < EV_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, eep->ee_stat[id]); eep->ee_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ static void siena_ev_qdestroy( __in efx_evq_t *eep) { efx_nic_t *enp = eep->ee_enp; efx_oword_t oword; /* Purge event queue */ EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, eep->ee_index, &oword, B_TRUE); EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE); } static void siena_ev_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } #endif /* EFSYS_OPT_SIENA */ Index: projects/clang700-import/sys/dev/sfxge/common/efx_impl.h =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_impl.h (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_impl.h (revision 340918) @@ -1,1218 +1,1182 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_EFX_IMPL_H #define _SYS_EFX_IMPL_H #include "efx.h" #include "efx_regs.h" #include "efx_regs_ef10.h" /* FIXME: Add definition for driver generated software events */ #ifndef ESE_DZ_EV_CODE_DRV_GEN_EV #define ESE_DZ_EV_CODE_DRV_GEN_EV FSE_AZ_EV_CODE_DRV_GEN_EV #endif #if EFSYS_OPT_SIENA #include "siena_impl.h" #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON #include "hunt_impl.h" #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD #include "medford_impl.h" #endif /* EFSYS_OPT_MEDFORD */ #if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) #include "ef10_impl.h" #endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */ #ifdef __cplusplus extern "C" { #endif #define EFX_MOD_MCDI 0x00000001 #define EFX_MOD_PROBE 0x00000002 #define EFX_MOD_NVRAM 0x00000004 #define EFX_MOD_VPD 0x00000008 #define EFX_MOD_NIC 0x00000010 #define EFX_MOD_INTR 0x00000020 #define EFX_MOD_EV 0x00000040 #define EFX_MOD_RX 0x00000080 #define EFX_MOD_TX 0x00000100 #define EFX_MOD_PORT 0x00000200 #define EFX_MOD_MON 0x00000400 #define EFX_MOD_FILTER 0x00001000 #define EFX_MOD_LIC 0x00002000 #define EFX_RESET_PHY 0x00000001 #define EFX_RESET_RXQ_ERR 0x00000002 #define EFX_RESET_TXQ_ERR 0x00000004 typedef enum efx_mac_type_e { EFX_MAC_INVALID = 0, EFX_MAC_SIENA, EFX_MAC_HUNTINGTON, EFX_MAC_MEDFORD, EFX_MAC_NTYPES } efx_mac_type_t; typedef struct efx_ev_ops_s { efx_rc_t (*eevo_init)(efx_nic_t *); void (*eevo_fini)(efx_nic_t *); efx_rc_t (*eevo_qcreate)(efx_nic_t *, unsigned int, efsys_mem_t *, size_t, uint32_t, uint32_t, uint32_t, efx_evq_t *); void (*eevo_qdestroy)(efx_evq_t *); efx_rc_t (*eevo_qprime)(efx_evq_t *, unsigned int); void (*eevo_qpost)(efx_evq_t *, uint16_t); efx_rc_t (*eevo_qmoderate)(efx_evq_t *, unsigned int); #if EFSYS_OPT_QSTATS void (*eevo_qstats_update)(efx_evq_t *, efsys_stat_t *); #endif } efx_ev_ops_t; typedef struct efx_tx_ops_s { efx_rc_t (*etxo_init)(efx_nic_t *); void (*etxo_fini)(efx_nic_t *); efx_rc_t (*etxo_qcreate)(efx_nic_t *, unsigned int, unsigned int, efsys_mem_t *, size_t, uint32_t, uint16_t, efx_evq_t *, efx_txq_t *, unsigned int *); void (*etxo_qdestroy)(efx_txq_t *); efx_rc_t (*etxo_qpost)(efx_txq_t *, efx_buffer_t *, unsigned int, unsigned int, unsigned int *); void (*etxo_qpush)(efx_txq_t *, unsigned int, unsigned int); efx_rc_t (*etxo_qpace)(efx_txq_t *, unsigned int); efx_rc_t (*etxo_qflush)(efx_txq_t *); void (*etxo_qenable)(efx_txq_t *); efx_rc_t (*etxo_qpio_enable)(efx_txq_t *); void (*etxo_qpio_disable)(efx_txq_t *); efx_rc_t (*etxo_qpio_write)(efx_txq_t *, uint8_t *, size_t, size_t); efx_rc_t (*etxo_qpio_post)(efx_txq_t *, size_t, unsigned int, unsigned int *); efx_rc_t (*etxo_qdesc_post)(efx_txq_t *, efx_desc_t *, unsigned int, unsigned int, unsigned int *); void (*etxo_qdesc_dma_create)(efx_txq_t *, efsys_dma_addr_t, size_t, boolean_t, efx_desc_t *); void (*etxo_qdesc_tso_create)(efx_txq_t *, uint16_t, uint32_t, uint8_t, efx_desc_t *); void (*etxo_qdesc_tso2_create)(efx_txq_t *, uint16_t, uint32_t, uint16_t, efx_desc_t *, int); void (*etxo_qdesc_vlantci_create)(efx_txq_t *, uint16_t, efx_desc_t *); #if EFSYS_OPT_QSTATS void (*etxo_qstats_update)(efx_txq_t *, efsys_stat_t *); #endif } efx_tx_ops_t; typedef struct efx_rx_ops_s { efx_rc_t (*erxo_init)(efx_nic_t *); void (*erxo_fini)(efx_nic_t *); #if EFSYS_OPT_RX_SCATTER efx_rc_t (*erxo_scatter_enable)(efx_nic_t *, unsigned int); #endif #if EFSYS_OPT_RX_SCALE efx_rc_t (*erxo_scale_context_alloc)(efx_nic_t *, efx_rx_scale_context_type_t, uint32_t, uint32_t *); efx_rc_t (*erxo_scale_context_free)(efx_nic_t *, uint32_t); efx_rc_t (*erxo_scale_mode_set)(efx_nic_t *, uint32_t, efx_rx_hash_alg_t, efx_rx_hash_type_t, boolean_t); efx_rc_t (*erxo_scale_key_set)(efx_nic_t *, uint32_t, uint8_t *, size_t); efx_rc_t (*erxo_scale_tbl_set)(efx_nic_t *, uint32_t, unsigned int *, size_t); uint32_t (*erxo_prefix_hash)(efx_nic_t *, efx_rx_hash_alg_t, uint8_t *); #endif /* EFSYS_OPT_RX_SCALE */ efx_rc_t (*erxo_prefix_pktlen)(efx_nic_t *, uint8_t *, uint16_t *); void (*erxo_qpost)(efx_rxq_t *, efsys_dma_addr_t *, size_t, unsigned int, unsigned int, unsigned int); void (*erxo_qpush)(efx_rxq_t *, unsigned int, unsigned int *); #if EFSYS_OPT_RX_PACKED_STREAM void (*erxo_qpush_ps_credits)(efx_rxq_t *); uint8_t * (*erxo_qps_packet_info)(efx_rxq_t *, uint8_t *, uint32_t, uint32_t, uint16_t *, uint32_t *, uint32_t *); #endif efx_rc_t (*erxo_qflush)(efx_rxq_t *); void (*erxo_qenable)(efx_rxq_t *); efx_rc_t (*erxo_qcreate)(efx_nic_t *enp, unsigned int, - unsigned int, efx_rxq_type_t, + unsigned int, efx_rxq_type_t, uint32_t, efsys_mem_t *, size_t, uint32_t, + unsigned int, efx_evq_t *, efx_rxq_t *); void (*erxo_qdestroy)(efx_rxq_t *); } efx_rx_ops_t; typedef struct efx_mac_ops_s { efx_rc_t (*emo_poll)(efx_nic_t *, efx_link_mode_t *); efx_rc_t (*emo_up)(efx_nic_t *, boolean_t *); efx_rc_t (*emo_addr_set)(efx_nic_t *); efx_rc_t (*emo_pdu_set)(efx_nic_t *); efx_rc_t (*emo_pdu_get)(efx_nic_t *, size_t *); efx_rc_t (*emo_reconfigure)(efx_nic_t *); efx_rc_t (*emo_multicast_list_set)(efx_nic_t *); efx_rc_t (*emo_filter_default_rxq_set)(efx_nic_t *, efx_rxq_t *, boolean_t); void (*emo_filter_default_rxq_clear)(efx_nic_t *); #if EFSYS_OPT_LOOPBACK efx_rc_t (*emo_loopback_set)(efx_nic_t *, efx_link_mode_t, efx_loopback_type_t); #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS efx_rc_t (*emo_stats_get_mask)(efx_nic_t *, uint32_t *, size_t); efx_rc_t (*emo_stats_clear)(efx_nic_t *); efx_rc_t (*emo_stats_upload)(efx_nic_t *, efsys_mem_t *); efx_rc_t (*emo_stats_periodic)(efx_nic_t *, efsys_mem_t *, uint16_t, boolean_t); efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *, efsys_stat_t *, uint32_t *); #endif /* EFSYS_OPT_MAC_STATS */ } efx_mac_ops_t; typedef struct efx_phy_ops_s { efx_rc_t (*epo_power)(efx_nic_t *, boolean_t); /* optional */ efx_rc_t (*epo_reset)(efx_nic_t *); efx_rc_t (*epo_reconfigure)(efx_nic_t *); efx_rc_t (*epo_verify)(efx_nic_t *); efx_rc_t (*epo_oui_get)(efx_nic_t *, uint32_t *); #if EFSYS_OPT_PHY_STATS efx_rc_t (*epo_stats_update)(efx_nic_t *, efsys_mem_t *, uint32_t *); #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_BIST efx_rc_t (*epo_bist_enable_offline)(efx_nic_t *); efx_rc_t (*epo_bist_start)(efx_nic_t *, efx_bist_type_t); efx_rc_t (*epo_bist_poll)(efx_nic_t *, efx_bist_type_t, efx_bist_result_t *, uint32_t *, unsigned long *, size_t); void (*epo_bist_stop)(efx_nic_t *, efx_bist_type_t); #endif /* EFSYS_OPT_BIST */ } efx_phy_ops_t; #if EFSYS_OPT_FILTER typedef struct efx_filter_ops_s { efx_rc_t (*efo_init)(efx_nic_t *); void (*efo_fini)(efx_nic_t *); efx_rc_t (*efo_restore)(efx_nic_t *); efx_rc_t (*efo_add)(efx_nic_t *, efx_filter_spec_t *, boolean_t may_replace); efx_rc_t (*efo_delete)(efx_nic_t *, efx_filter_spec_t *); efx_rc_t (*efo_supported_filters)(efx_nic_t *, uint32_t *, size_t, size_t *); efx_rc_t (*efo_reconfigure)(efx_nic_t *, uint8_t const *, boolean_t, boolean_t, boolean_t, boolean_t, uint8_t const *, uint32_t); } efx_filter_ops_t; extern __checkReturn efx_rc_t efx_filter_reconfigure( __in efx_nic_t *enp, __in_ecount(6) uint8_t const *mac_addr, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst, __in_ecount(6*count) uint8_t const *addrs, __in uint32_t count); #endif /* EFSYS_OPT_FILTER */ typedef struct efx_port_s { efx_mac_type_t ep_mac_type; uint32_t ep_phy_type; uint8_t ep_port; uint32_t ep_mac_pdu; uint8_t ep_mac_addr[6]; efx_link_mode_t ep_link_mode; boolean_t ep_all_unicst; boolean_t ep_mulcst; boolean_t ep_all_mulcst; boolean_t ep_brdcst; unsigned int ep_fcntl; boolean_t ep_fcntl_autoneg; efx_oword_t ep_multicst_hash[2]; uint8_t ep_mulcst_addr_list[EFX_MAC_ADDR_LEN * EFX_MAC_MULTICAST_LIST_MAX]; uint32_t ep_mulcst_addr_count; #if EFSYS_OPT_LOOPBACK efx_loopback_type_t ep_loopback_type; efx_link_mode_t ep_loopback_link_mode; #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_PHY_FLAGS uint32_t ep_phy_flags; #endif /* EFSYS_OPT_PHY_FLAGS */ #if EFSYS_OPT_PHY_LED_CONTROL efx_phy_led_mode_t ep_phy_led_mode; #endif /* EFSYS_OPT_PHY_LED_CONTROL */ efx_phy_media_type_t ep_fixed_port_type; efx_phy_media_type_t ep_module_type; uint32_t ep_adv_cap_mask; uint32_t ep_lp_cap_mask; uint32_t ep_default_adv_cap_mask; uint32_t ep_phy_cap_mask; boolean_t ep_mac_drain; - boolean_t ep_mac_stats_pending; #if EFSYS_OPT_BIST efx_bist_type_t ep_current_bist; #endif const efx_mac_ops_t *ep_emop; const efx_phy_ops_t *ep_epop; } efx_port_t; typedef struct efx_mon_ops_s { #if EFSYS_OPT_MON_STATS efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *, efx_mon_stat_value_t *); #endif /* EFSYS_OPT_MON_STATS */ } efx_mon_ops_t; typedef struct efx_mon_s { efx_mon_type_t em_type; const efx_mon_ops_t *em_emop; } efx_mon_t; typedef struct efx_intr_ops_s { efx_rc_t (*eio_init)(efx_nic_t *, efx_intr_type_t, efsys_mem_t *); void (*eio_enable)(efx_nic_t *); void (*eio_disable)(efx_nic_t *); void (*eio_disable_unlocked)(efx_nic_t *); efx_rc_t (*eio_trigger)(efx_nic_t *, unsigned int); void (*eio_status_line)(efx_nic_t *, boolean_t *, uint32_t *); void (*eio_status_message)(efx_nic_t *, unsigned int, boolean_t *); void (*eio_fatal)(efx_nic_t *); void (*eio_fini)(efx_nic_t *); } efx_intr_ops_t; typedef struct efx_intr_s { const efx_intr_ops_t *ei_eiop; efsys_mem_t *ei_esmp; efx_intr_type_t ei_type; unsigned int ei_level; } efx_intr_t; typedef struct efx_nic_ops_s { efx_rc_t (*eno_probe)(efx_nic_t *); efx_rc_t (*eno_board_cfg)(efx_nic_t *); efx_rc_t (*eno_set_drv_limits)(efx_nic_t *, efx_drv_limits_t*); efx_rc_t (*eno_reset)(efx_nic_t *); efx_rc_t (*eno_init)(efx_nic_t *); efx_rc_t (*eno_get_vi_pool)(efx_nic_t *, uint32_t *); efx_rc_t (*eno_get_bar_region)(efx_nic_t *, efx_nic_region_t, uint32_t *, size_t *); #if EFSYS_OPT_DIAG efx_rc_t (*eno_register_test)(efx_nic_t *); #endif /* EFSYS_OPT_DIAG */ void (*eno_fini)(efx_nic_t *); void (*eno_unprobe)(efx_nic_t *); } efx_nic_ops_t; #ifndef EFX_TXQ_LIMIT_TARGET #define EFX_TXQ_LIMIT_TARGET 259 #endif #ifndef EFX_RXQ_LIMIT_TARGET #define EFX_RXQ_LIMIT_TARGET 512 #endif -#ifndef EFX_TXQ_DC_SIZE -#define EFX_TXQ_DC_SIZE 1 /* 16 descriptors */ -#endif -#ifndef EFX_RXQ_DC_SIZE -#define EFX_RXQ_DC_SIZE 3 /* 64 descriptors */ -#endif + #if EFSYS_OPT_FILTER typedef struct siena_filter_spec_s { uint8_t sfs_type; uint32_t sfs_flags; uint32_t sfs_dmaq_id; uint32_t sfs_dword[3]; } siena_filter_spec_t; typedef enum siena_filter_type_e { EFX_SIENA_FILTER_RX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */ EFX_SIENA_FILTER_RX_TCP_WILD, /* TCP/IPv4 {dIP,dTCP, -, -} */ EFX_SIENA_FILTER_RX_UDP_FULL, /* UDP/IPv4 {dIP,dUDP,sIP,sUDP} */ EFX_SIENA_FILTER_RX_UDP_WILD, /* UDP/IPv4 {dIP,dUDP, -, -} */ EFX_SIENA_FILTER_RX_MAC_FULL, /* Ethernet {dMAC,VLAN} */ EFX_SIENA_FILTER_RX_MAC_WILD, /* Ethernet {dMAC, -} */ EFX_SIENA_FILTER_TX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */ EFX_SIENA_FILTER_TX_TCP_WILD, /* TCP/IPv4 { -, -,sIP,sTCP} */ EFX_SIENA_FILTER_TX_UDP_FULL, /* UDP/IPv4 {dIP,dTCP,sIP,sTCP} */ EFX_SIENA_FILTER_TX_UDP_WILD, /* UDP/IPv4 { -, -,sIP,sUDP} */ EFX_SIENA_FILTER_TX_MAC_FULL, /* Ethernet {sMAC,VLAN} */ EFX_SIENA_FILTER_TX_MAC_WILD, /* Ethernet {sMAC, -} */ EFX_SIENA_FILTER_NTYPES } siena_filter_type_t; typedef enum siena_filter_tbl_id_e { EFX_SIENA_FILTER_TBL_RX_IP = 0, EFX_SIENA_FILTER_TBL_RX_MAC, EFX_SIENA_FILTER_TBL_TX_IP, EFX_SIENA_FILTER_TBL_TX_MAC, EFX_SIENA_FILTER_NTBLS } siena_filter_tbl_id_t; typedef struct siena_filter_tbl_s { int sft_size; /* number of entries */ int sft_used; /* active count */ uint32_t *sft_bitmap; /* active bitmap */ siena_filter_spec_t *sft_spec; /* array of saved specs */ } siena_filter_tbl_t; typedef struct siena_filter_s { siena_filter_tbl_t sf_tbl[EFX_SIENA_FILTER_NTBLS]; unsigned int sf_depth[EFX_SIENA_FILTER_NTYPES]; } siena_filter_t; typedef struct efx_filter_s { #if EFSYS_OPT_SIENA siena_filter_t *ef_siena_filter; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD ef10_filter_table_t *ef_ef10_filter_table; #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ } efx_filter_t; extern void siena_filter_tbl_clear( __in efx_nic_t *enp, __in siena_filter_tbl_id_t tbl); #endif /* EFSYS_OPT_FILTER */ #if EFSYS_OPT_MCDI typedef struct efx_mcdi_ops_s { efx_rc_t (*emco_init)(efx_nic_t *, const efx_mcdi_transport_t *); void (*emco_send_request)(efx_nic_t *, void *, size_t, void *, size_t); efx_rc_t (*emco_poll_reboot)(efx_nic_t *); boolean_t (*emco_poll_response)(efx_nic_t *); void (*emco_read_response)(efx_nic_t *, void *, size_t, size_t); void (*emco_fini)(efx_nic_t *); efx_rc_t (*emco_feature_supported)(efx_nic_t *, efx_mcdi_feature_id_t, boolean_t *); void (*emco_get_timeout)(efx_nic_t *, efx_mcdi_req_t *, uint32_t *); } efx_mcdi_ops_t; typedef struct efx_mcdi_s { const efx_mcdi_ops_t *em_emcop; const efx_mcdi_transport_t *em_emtp; efx_mcdi_iface_t em_emip; } efx_mcdi_t; #endif /* EFSYS_OPT_MCDI */ #if EFSYS_OPT_NVRAM /* Invalid partition ID for en_nvram_partn_locked field of efx_nc_t */ #define EFX_NVRAM_PARTN_INVALID (0xffffffffu) typedef struct efx_nvram_ops_s { #if EFSYS_OPT_DIAG efx_rc_t (*envo_test)(efx_nic_t *); #endif /* EFSYS_OPT_DIAG */ efx_rc_t (*envo_type_to_partn)(efx_nic_t *, efx_nvram_type_t, uint32_t *); efx_rc_t (*envo_partn_size)(efx_nic_t *, uint32_t, size_t *); efx_rc_t (*envo_partn_rw_start)(efx_nic_t *, uint32_t, size_t *); efx_rc_t (*envo_partn_read)(efx_nic_t *, uint32_t, unsigned int, caddr_t, size_t); efx_rc_t (*envo_partn_read_backup)(efx_nic_t *, uint32_t, unsigned int, caddr_t, size_t); efx_rc_t (*envo_partn_erase)(efx_nic_t *, uint32_t, unsigned int, size_t); efx_rc_t (*envo_partn_write)(efx_nic_t *, uint32_t, unsigned int, caddr_t, size_t); efx_rc_t (*envo_partn_rw_finish)(efx_nic_t *, uint32_t, uint32_t *); efx_rc_t (*envo_partn_get_version)(efx_nic_t *, uint32_t, uint32_t *, uint16_t *); efx_rc_t (*envo_partn_set_version)(efx_nic_t *, uint32_t, uint16_t *); efx_rc_t (*envo_buffer_validate)(efx_nic_t *, uint32_t, caddr_t, size_t); } efx_nvram_ops_t; #endif /* EFSYS_OPT_NVRAM */ #if EFSYS_OPT_VPD typedef struct efx_vpd_ops_s { efx_rc_t (*evpdo_init)(efx_nic_t *); efx_rc_t (*evpdo_size)(efx_nic_t *, size_t *); efx_rc_t (*evpdo_read)(efx_nic_t *, caddr_t, size_t); efx_rc_t (*evpdo_verify)(efx_nic_t *, caddr_t, size_t); efx_rc_t (*evpdo_reinit)(efx_nic_t *, caddr_t, size_t); efx_rc_t (*evpdo_get)(efx_nic_t *, caddr_t, size_t, efx_vpd_value_t *); efx_rc_t (*evpdo_set)(efx_nic_t *, caddr_t, size_t, efx_vpd_value_t *); efx_rc_t (*evpdo_next)(efx_nic_t *, caddr_t, size_t, efx_vpd_value_t *, unsigned int *); efx_rc_t (*evpdo_write)(efx_nic_t *, caddr_t, size_t); void (*evpdo_fini)(efx_nic_t *); } efx_vpd_ops_t; #endif /* EFSYS_OPT_VPD */ #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM __checkReturn efx_rc_t efx_mcdi_nvram_partitions( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size, __out unsigned int *npartnp); __checkReturn efx_rc_t efx_mcdi_nvram_metadata( __in efx_nic_t *enp, __in uint32_t partn, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4], __out_bcount_opt(size) char *descp, __in size_t size); __checkReturn efx_rc_t efx_mcdi_nvram_info( __in efx_nic_t *enp, __in uint32_t partn, __out_opt size_t *sizep, __out_opt uint32_t *addressp, __out_opt uint32_t *erase_sizep, __out_opt uint32_t *write_sizep); __checkReturn efx_rc_t efx_mcdi_nvram_update_start( __in efx_nic_t *enp, __in uint32_t partn); __checkReturn efx_rc_t efx_mcdi_nvram_read( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t offset, __out_bcount(size) caddr_t data, __in size_t size, __in uint32_t mode); __checkReturn efx_rc_t efx_mcdi_nvram_erase( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t offset, __in size_t size); __checkReturn efx_rc_t efx_mcdi_nvram_write( __in efx_nic_t *enp, __in uint32_t partn, __in uint32_t offset, __out_bcount(size) caddr_t data, __in size_t size); __checkReturn efx_rc_t efx_mcdi_nvram_update_finish( __in efx_nic_t *enp, __in uint32_t partn, __in boolean_t reboot, __out_opt uint32_t *verify_resultp); #if EFSYS_OPT_DIAG __checkReturn efx_rc_t efx_mcdi_nvram_test( __in efx_nic_t *enp, __in uint32_t partn); #endif /* EFSYS_OPT_DIAG */ #endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ #if EFSYS_OPT_LICENSING typedef struct efx_lic_ops_s { efx_rc_t (*elo_update_licenses)(efx_nic_t *); efx_rc_t (*elo_get_key_stats)(efx_nic_t *, efx_key_stats_t *); efx_rc_t (*elo_app_state)(efx_nic_t *, uint64_t, boolean_t *); efx_rc_t (*elo_get_id)(efx_nic_t *, size_t, uint32_t *, size_t *, uint8_t *); efx_rc_t (*elo_find_start) (efx_nic_t *, caddr_t, size_t, uint32_t *); efx_rc_t (*elo_find_end)(efx_nic_t *, caddr_t, size_t, uint32_t, uint32_t *); boolean_t (*elo_find_key)(efx_nic_t *, caddr_t, size_t, uint32_t, uint32_t *, uint32_t *); boolean_t (*elo_validate_key)(efx_nic_t *, caddr_t, uint32_t); efx_rc_t (*elo_read_key)(efx_nic_t *, caddr_t, size_t, uint32_t, uint32_t, caddr_t, size_t, uint32_t *); efx_rc_t (*elo_write_key)(efx_nic_t *, caddr_t, size_t, uint32_t, caddr_t, uint32_t, uint32_t *); efx_rc_t (*elo_delete_key)(efx_nic_t *, caddr_t, size_t, uint32_t, uint32_t, uint32_t, uint32_t *); efx_rc_t (*elo_create_partition)(efx_nic_t *, caddr_t, size_t); efx_rc_t (*elo_finish_partition)(efx_nic_t *, caddr_t, size_t); } efx_lic_ops_t; #endif typedef struct efx_drv_cfg_s { uint32_t edc_min_vi_count; uint32_t edc_max_vi_count; uint32_t edc_max_piobuf_count; uint32_t edc_pio_alloc_size; } efx_drv_cfg_t; struct efx_nic_s { uint32_t en_magic; efx_family_t en_family; uint32_t en_features; efsys_identifier_t *en_esip; efsys_lock_t *en_eslp; efsys_bar_t *en_esbp; unsigned int en_mod_flags; unsigned int en_reset_flags; efx_nic_cfg_t en_nic_cfg; efx_drv_cfg_t en_drv_cfg; efx_port_t en_port; efx_mon_t en_mon; efx_intr_t en_intr; uint32_t en_ev_qcount; uint32_t en_rx_qcount; uint32_t en_tx_qcount; const efx_nic_ops_t *en_enop; const efx_ev_ops_t *en_eevop; const efx_tx_ops_t *en_etxop; const efx_rx_ops_t *en_erxop; #if EFSYS_OPT_FILTER efx_filter_t en_filter; const efx_filter_ops_t *en_efop; #endif /* EFSYS_OPT_FILTER */ #if EFSYS_OPT_MCDI efx_mcdi_t en_mcdi; #endif /* EFSYS_OPT_MCDI */ #if EFSYS_OPT_NVRAM uint32_t en_nvram_partn_locked; const efx_nvram_ops_t *en_envop; #endif /* EFSYS_OPT_NVRAM */ #if EFSYS_OPT_VPD const efx_vpd_ops_t *en_evpdop; #endif /* EFSYS_OPT_VPD */ #if EFSYS_OPT_RX_SCALE efx_rx_hash_support_t en_hash_support; efx_rx_scale_context_type_t en_rss_context_type; uint32_t en_rss_context; #endif /* EFSYS_OPT_RX_SCALE */ uint32_t en_vport_id; #if EFSYS_OPT_LICENSING const efx_lic_ops_t *en_elop; boolean_t en_licensing_supported; #endif union { #if EFSYS_OPT_SIENA struct { #if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD unsigned int enu_partn_mask; #endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */ #if EFSYS_OPT_VPD caddr_t enu_svpd; size_t enu_svpd_length; #endif /* EFSYS_OPT_VPD */ int enu_unused; } siena; #endif /* EFSYS_OPT_SIENA */ int enu_unused; } en_u; #if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) union en_arch { struct { int ena_vi_base; int ena_vi_count; int ena_vi_shift; #if EFSYS_OPT_VPD caddr_t ena_svpd; size_t ena_svpd_length; #endif /* EFSYS_OPT_VPD */ efx_piobuf_handle_t ena_piobuf_handle[EF10_MAX_PIOBUF_NBUFS]; uint32_t ena_piobuf_count; uint32_t ena_pio_alloc_map[EF10_MAX_PIOBUF_NBUFS]; uint32_t ena_pio_write_vi_base; /* Memory BAR mapping regions */ uint32_t ena_uc_mem_map_offset; size_t ena_uc_mem_map_size; uint32_t ena_wc_mem_map_offset; size_t ena_wc_mem_map_size; } ef10; } en_arch; #endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */ }; #define EFX_NIC_MAGIC 0x02121996 typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *, const efx_ev_callbacks_t *, void *); typedef struct efx_evq_rxq_state_s { unsigned int eers_rx_read_ptr; unsigned int eers_rx_mask; #if EFSYS_OPT_RX_PACKED_STREAM unsigned int eers_rx_stream_npackets; boolean_t eers_rx_packed_stream; unsigned int eers_rx_packed_stream_credits; #endif } efx_evq_rxq_state_t; struct efx_evq_s { uint32_t ee_magic; efx_nic_t *ee_enp; unsigned int ee_index; unsigned int ee_mask; efsys_mem_t *ee_esmp; #if EFSYS_OPT_QSTATS uint32_t ee_stat[EV_NQSTATS]; #endif /* EFSYS_OPT_QSTATS */ efx_ev_handler_t ee_rx; efx_ev_handler_t ee_tx; efx_ev_handler_t ee_driver; efx_ev_handler_t ee_global; efx_ev_handler_t ee_drv_gen; #if EFSYS_OPT_MCDI efx_ev_handler_t ee_mcdi; #endif /* EFSYS_OPT_MCDI */ efx_evq_rxq_state_t ee_rxq_state[EFX_EV_RX_NLABELS]; uint32_t ee_flags; }; #define EFX_EVQ_MAGIC 0x08081997 #define EFX_EVQ_SIENA_TIMER_QUANTUM_NS 6144 /* 768 cycles */ struct efx_rxq_s { uint32_t er_magic; efx_nic_t *er_enp; efx_evq_t *er_eep; unsigned int er_index; unsigned int er_label; unsigned int er_mask; efsys_mem_t *er_esmp; efx_evq_rxq_state_t *er_ev_qstate; }; #define EFX_RXQ_MAGIC 0x15022005 struct efx_txq_s { uint32_t et_magic; efx_nic_t *et_enp; unsigned int et_index; unsigned int et_mask; efsys_mem_t *et_esmp; #if EFSYS_OPT_HUNTINGTON uint32_t et_pio_bufnum; uint32_t et_pio_blknum; uint32_t et_pio_write_offset; uint32_t et_pio_offset; size_t et_pio_size; #endif #if EFSYS_OPT_QSTATS uint32_t et_stat[TX_NQSTATS]; #endif /* EFSYS_OPT_QSTATS */ }; #define EFX_TXQ_MAGIC 0x05092005 #define EFX_MAC_ADDR_COPY(_dst, _src) \ do { \ (_dst)[0] = (_src)[0]; \ (_dst)[1] = (_src)[1]; \ (_dst)[2] = (_src)[2]; \ (_dst)[3] = (_src)[3]; \ (_dst)[4] = (_src)[4]; \ (_dst)[5] = (_src)[5]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_MAC_BROADCAST_ADDR_SET(_dst) \ do { \ uint16_t *_d = (uint16_t *)(_dst); \ _d[0] = 0xffff; \ _d[1] = 0xffff; \ _d[2] = 0xffff; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #if EFSYS_OPT_CHECK_REG #define EFX_CHECK_REG(_enp, _reg) \ do { \ const char *name = #_reg; \ char min = name[4]; \ char max = name[5]; \ char rev; \ \ switch ((_enp)->en_family) { \ case EFX_FAMILY_SIENA: \ rev = 'C'; \ break; \ \ case EFX_FAMILY_HUNTINGTON: \ rev = 'D'; \ break; \ \ case EFX_FAMILY_MEDFORD: \ rev = 'E'; \ break; \ \ default: \ rev = '?'; \ break; \ } \ \ EFSYS_ASSERT3S(rev, >=, min); \ EFSYS_ASSERT3S(rev, <=, max); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_CHECK_REG(_enp, _reg) do { \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #endif #define EFX_BAR_READD(_enp, _reg, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_BAR_READD((_enp)->en_esbp, _reg ## _OFST, \ (_edp), (_lock)); \ EFSYS_PROBE3(efx_bar_readd, const char *, #_reg, \ uint32_t, _reg ## _OFST, \ uint32_t, (_edp)->ed_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_WRITED(_enp, _reg, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE3(efx_bar_writed, const char *, #_reg, \ uint32_t, _reg ## _OFST, \ uint32_t, (_edp)->ed_u32[0]); \ EFSYS_BAR_WRITED((_enp)->en_esbp, _reg ## _OFST, \ (_edp), (_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_READQ(_enp, _reg, _eqp) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_BAR_READQ((_enp)->en_esbp, _reg ## _OFST, \ (_eqp)); \ EFSYS_PROBE4(efx_bar_readq, const char *, #_reg, \ uint32_t, _reg ## _OFST, \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_WRITEQ(_enp, _reg, _eqp) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE4(efx_bar_writeq, const char *, #_reg, \ uint32_t, _reg ## _OFST, \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ EFSYS_BAR_WRITEQ((_enp)->en_esbp, _reg ## _OFST, \ (_eqp)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_READO(_enp, _reg, _eop) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_BAR_READO((_enp)->en_esbp, _reg ## _OFST, \ (_eop), B_TRUE); \ EFSYS_PROBE6(efx_bar_reado, const char *, #_reg, \ uint32_t, _reg ## _OFST, \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_WRITEO(_enp, _reg, _eop) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE6(efx_bar_writeo, const char *, #_reg, \ uint32_t, _reg ## _OFST, \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ EFSYS_BAR_WRITEO((_enp)->en_esbp, _reg ## _OFST, \ (_eop), B_TRUE); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_BAR_READD((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_edp), (_lock)); \ EFSYS_PROBE4(efx_bar_tbl_readd, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_edp)->ed_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_WRITED(_enp, _reg, _index, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_edp)->ed_u32[0]); \ EFSYS_BAR_WRITED((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_edp), (_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_WRITED2(_enp, _reg, _index, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_edp)->ed_u32[0]); \ EFSYS_BAR_WRITED((_enp)->en_esbp, \ (_reg ## _OFST + \ (2 * sizeof (efx_dword_t)) + \ ((_index) * _reg ## _STEP)), \ (_edp), (_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_edp)->ed_u32[0]); \ EFSYS_BAR_WRITED((_enp)->en_esbp, \ (_reg ## _OFST + \ (3 * sizeof (efx_dword_t)) + \ ((_index) * _reg ## _STEP)), \ (_edp), (_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_READQ(_enp, _reg, _index, _eqp) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_BAR_READQ((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_eqp)); \ EFSYS_PROBE5(efx_bar_tbl_readq, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_WRITEQ(_enp, _reg, _index, _eqp) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE5(efx_bar_tbl_writeq, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ EFSYS_BAR_WRITEQ((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_eqp)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_READO(_enp, _reg, _index, _eop, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_BAR_READO((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_eop), (_lock)); \ EFSYS_PROBE7(efx_bar_tbl_reado, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_BAR_TBL_WRITEO(_enp, _reg, _index, _eop, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE7(efx_bar_tbl_writeo, const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ EFSYS_BAR_WRITEO((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_eop), (_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* * Allow drivers to perform optimised 128-bit doorbell writes. * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are * special-cased in the BIU on the Falcon/Siena and EF10 architectures to avoid * the need for locking in the host, and are the only ones known to be safe to * use 128-bites write with. */ #define EFX_BAR_TBL_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ EFSYS_PROBE7(efx_bar_tbl_doorbell_writeo, \ - const char *, \ - #_reg, \ + const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ EFSYS_BAR_DOORBELL_WRITEO((_enp)->en_esbp, \ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ (_eop)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_DMA_SYNC_QUEUE_FOR_DEVICE(_esmp, _entries, _wptr, _owptr) \ do { \ unsigned int _new = (_wptr); \ unsigned int _old = (_owptr); \ \ if ((_new) >= (_old)) \ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \ (_old) * sizeof (efx_desc_t), \ ((_new) - (_old)) * sizeof (efx_desc_t)); \ else \ /* \ * It is cheaper to sync entire map than sync \ * two parts especially when offset/size are \ * ignored and entire map is synced in any case.\ */ \ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \ 0, \ (_entries) * sizeof (efx_desc_t)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) extern __checkReturn efx_rc_t -efx_nic_biu_test( - __in efx_nic_t *enp); - -extern __checkReturn efx_rc_t efx_mac_select( __in efx_nic_t *enp); extern void efx_mac_multicast_hash_compute( __in_ecount(6*count) uint8_t const *addrs, __in int count, __out efx_oword_t *hash_low, __out efx_oword_t *hash_high); extern __checkReturn efx_rc_t efx_phy_probe( __in efx_nic_t *enp); extern void efx_phy_unprobe( __in efx_nic_t *enp); #if EFSYS_OPT_VPD /* VPD utility functions */ extern __checkReturn efx_rc_t efx_vpd_hunk_length( __in_bcount(size) caddr_t data, __in size_t size, __out size_t *lengthp); extern __checkReturn efx_rc_t efx_vpd_hunk_verify( __in_bcount(size) caddr_t data, __in size_t size, __out_opt boolean_t *cksummedp); extern __checkReturn efx_rc_t efx_vpd_hunk_reinit( __in_bcount(size) caddr_t data, __in size_t size, __in boolean_t wantpid); extern __checkReturn efx_rc_t efx_vpd_hunk_get( __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_tag_t tag, __in efx_vpd_keyword_t keyword, __out unsigned int *payloadp, __out uint8_t *paylenp); extern __checkReturn efx_rc_t efx_vpd_hunk_next( __in_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_tag_t *tagp, __out efx_vpd_keyword_t *keyword, __out_opt unsigned int *payloadp, __out_opt uint8_t *paylenp, __inout unsigned int *contp); extern __checkReturn efx_rc_t efx_vpd_hunk_set( __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp); #endif /* EFSYS_OPT_VPD */ - -#if EFSYS_OPT_DIAG - -extern efx_sram_pattern_fn_t __efx_sram_pattern_fns[]; - -typedef struct efx_register_set_s { - unsigned int address; - unsigned int step; - unsigned int rows; - efx_oword_t mask; -} efx_register_set_t; - -extern __checkReturn efx_rc_t -efx_nic_test_registers( - __in efx_nic_t *enp, - __in efx_register_set_t *rsp, - __in size_t count); - -extern __checkReturn efx_rc_t -efx_nic_test_tables( - __in efx_nic_t *enp, - __in efx_register_set_t *rsp, - __in efx_pattern_type_t pattern, - __in size_t count); - -#endif /* EFSYS_OPT_DIAG */ #if EFSYS_OPT_MCDI extern __checkReturn efx_rc_t efx_mcdi_set_workaround( __in efx_nic_t *enp, __in uint32_t type, __in boolean_t enabled, __out_opt uint32_t *flagsp); extern __checkReturn efx_rc_t efx_mcdi_get_workarounds( __in efx_nic_t *enp, __out_opt uint32_t *implementedp, __out_opt uint32_t *enabledp); #endif /* EFSYS_OPT_MCDI */ #if EFSYS_OPT_MAC_STATS /* * Closed range of stats (i.e. the first and the last are included). * The last must be greater or equal (if the range is one item only) to * the first. */ struct efx_mac_stats_range { efx_mac_stat_t first; efx_mac_stat_t last; }; extern efx_rc_t efx_mac_stats_mask_add_ranges( __inout_bcount(mask_size) uint32_t *maskp, __in size_t mask_size, __in_ecount(rng_count) const struct efx_mac_stats_range *rngp, __in unsigned int rng_count); #endif /* EFSYS_OPT_MAC_STATS */ #ifdef __cplusplus } #endif #endif /* _SYS_EFX_IMPL_H */ Index: projects/clang700-import/sys/dev/sfxge/common/efx_lic.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_lic.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_lic.c (revision 340918) @@ -1,1754 +1,1718 @@ /*- * Copyright (c) 2009-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_LICENSING #include "ef10_tlv_layout.h" #if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON __checkReturn efx_rc_t efx_lic_v1v2_find_start( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, - __out uint32_t *startp - ); + __out uint32_t *startp); __checkReturn efx_rc_t efx_lic_v1v2_find_end( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, - __out uint32_t *endp - ); + __out uint32_t *endp); __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v1v2_find_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, - __out uint32_t *lengthp - ); + __out uint32_t *lengthp); __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v1v2_validate_key( __in efx_nic_t *enp, __in_bcount(length) caddr_t keyp, - __in uint32_t length - ); + __in uint32_t length); __checkReturn efx_rc_t efx_lic_v1v2_read_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(key_max_size, *lengthp) caddr_t keyp, __in size_t key_max_size, - __out uint32_t *lengthp - ); + __out uint32_t *lengthp); __checkReturn efx_rc_t efx_lic_v1v2_write_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, - __out uint32_t *lengthp - ); + __out uint32_t *lengthp); __checkReturn efx_rc_t efx_lic_v1v2_delete_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end, - __out uint32_t *deltap - ); + __out uint32_t *deltap); __checkReturn efx_rc_t efx_lic_v1v2_create_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, - __in size_t buffer_size - ); + __in size_t buffer_size); __checkReturn efx_rc_t efx_lic_v1v2_finish_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, - __in size_t buffer_size - ); + __in size_t buffer_size); #endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t efx_mcdi_fc_license_update_license( __in efx_nic_t *enp); static __checkReturn efx_rc_t efx_mcdi_fc_license_get_key_stats( __in efx_nic_t *enp, __out efx_key_stats_t *eksp); static const efx_lic_ops_t __efx_lic_v1_ops = { efx_mcdi_fc_license_update_license, /* elo_update_licenses */ efx_mcdi_fc_license_get_key_stats, /* elo_get_key_stats */ NULL, /* elo_app_state */ NULL, /* elo_get_id */ efx_lic_v1v2_find_start, /* elo_find_start */ efx_lic_v1v2_find_end, /* elo_find_end */ efx_lic_v1v2_find_key, /* elo_find_key */ efx_lic_v1v2_validate_key, /* elo_validate_key */ efx_lic_v1v2_read_key, /* elo_read_key */ efx_lic_v1v2_write_key, /* elo_write_key */ efx_lic_v1v2_delete_key, /* elo_delete_key */ efx_lic_v1v2_create_partition, /* elo_create_partition */ efx_lic_v1v2_finish_partition, /* elo_finish_partition */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static __checkReturn efx_rc_t efx_mcdi_licensing_update_licenses( __in efx_nic_t *enp); static __checkReturn efx_rc_t efx_mcdi_licensing_get_key_stats( __in efx_nic_t *enp, __out efx_key_stats_t *eksp); static __checkReturn efx_rc_t efx_mcdi_licensed_app_state( __in efx_nic_t *enp, __in uint64_t app_id, __out boolean_t *licensedp); static const efx_lic_ops_t __efx_lic_v2_ops = { efx_mcdi_licensing_update_licenses, /* elo_update_licenses */ efx_mcdi_licensing_get_key_stats, /* elo_get_key_stats */ efx_mcdi_licensed_app_state, /* elo_app_state */ NULL, /* elo_get_id */ efx_lic_v1v2_find_start, /* elo_find_start */ efx_lic_v1v2_find_end, /* elo_find_end */ efx_lic_v1v2_find_key, /* elo_find_key */ efx_lic_v1v2_validate_key, /* elo_validate_key */ efx_lic_v1v2_read_key, /* elo_read_key */ efx_lic_v1v2_write_key, /* elo_write_key */ efx_lic_v1v2_delete_key, /* elo_delete_key */ efx_lic_v1v2_create_partition, /* elo_create_partition */ efx_lic_v1v2_finish_partition, /* elo_finish_partition */ }; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD static __checkReturn efx_rc_t efx_mcdi_licensing_v3_update_licenses( __in efx_nic_t *enp); static __checkReturn efx_rc_t efx_mcdi_licensing_v3_report_license( __in efx_nic_t *enp, __out efx_key_stats_t *eksp); static __checkReturn efx_rc_t efx_mcdi_licensing_v3_app_state( __in efx_nic_t *enp, __in uint64_t app_id, __out boolean_t *licensedp); static __checkReturn efx_rc_t efx_mcdi_licensing_v3_get_id( __in efx_nic_t *enp, __in size_t buffer_size, __out uint32_t *typep, __out size_t *lengthp, __out_bcount_part_opt(buffer_size, *lengthp) uint8_t *bufferp); __checkReturn efx_rc_t efx_lic_v3_find_start( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, - __out uint32_t *startp - ); + __out uint32_t *startp); __checkReturn efx_rc_t efx_lic_v3_find_end( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, - __out uint32_t *endp - ); + __out uint32_t *endp); __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v3_find_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, - __out uint32_t *lengthp - ); + __out uint32_t *lengthp); __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v3_validate_key( __in efx_nic_t *enp, __in_bcount(length) caddr_t keyp, - __in uint32_t length - ); + __in uint32_t length); __checkReturn efx_rc_t efx_lic_v3_read_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(key_max_size, *lengthp) caddr_t keyp, __in size_t key_max_size, - __out uint32_t *lengthp - ); + __out uint32_t *lengthp); __checkReturn efx_rc_t efx_lic_v3_write_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, - __out uint32_t *lengthp - ); + __out uint32_t *lengthp); __checkReturn efx_rc_t efx_lic_v3_delete_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end, - __out uint32_t *deltap - ); + __out uint32_t *deltap); __checkReturn efx_rc_t efx_lic_v3_create_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, - __in size_t buffer_size - ); + __in size_t buffer_size); __checkReturn efx_rc_t efx_lic_v3_finish_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, - __in size_t buffer_size - ); + __in size_t buffer_size); static const efx_lic_ops_t __efx_lic_v3_ops = { efx_mcdi_licensing_v3_update_licenses, /* elo_update_licenses */ efx_mcdi_licensing_v3_report_license, /* elo_get_key_stats */ efx_mcdi_licensing_v3_app_state, /* elo_app_state */ efx_mcdi_licensing_v3_get_id, /* elo_get_id */ - efx_lic_v3_find_start, /* elo_find_start*/ + efx_lic_v3_find_start, /* elo_find_start */ efx_lic_v3_find_end, /* elo_find_end */ efx_lic_v3_find_key, /* elo_find_key */ efx_lic_v3_validate_key, /* elo_validate_key */ efx_lic_v3_read_key, /* elo_read_key */ efx_lic_v3_write_key, /* elo_write_key */ efx_lic_v3_delete_key, /* elo_delete_key */ efx_lic_v3_create_partition, /* elo_create_partition */ efx_lic_v3_finish_partition, /* elo_finish_partition */ }; #endif /* EFSYS_OPT_MEDFORD */ /* V1 Licensing - used in Siena Modena only */ #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t efx_mcdi_fc_license_update_license( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MC_CMD_FC_IN_LICENSE_LEN]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN; req.emr_out_buf = payload; req.emr_out_length = 0; MCDI_IN_SET_DWORD(req, FC_IN_CMD, MC_CMD_FC_OP_LICENSE); MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP, MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used != 0) { rc = EIO; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_fc_license_get_key_stats( __in efx_nic_t *enp, __out efx_key_stats_t *eksp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_FC_IN_LICENSE_LEN, MC_CMD_FC_OUT_LICENSE_LEN)]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FC; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FC_OUT_LICENSE_LEN; MCDI_IN_SET_DWORD(req, FC_IN_CMD, MC_CMD_FC_OP_LICENSE); MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP, MC_CMD_FC_IN_LICENSE_GET_KEY_STATS); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_FC_OUT_LICENSE_LEN) { rc = EMSGSIZE; goto fail2; } eksp->eks_valid = MCDI_OUT_DWORD(req, FC_OUT_LICENSE_VALID_KEYS); eksp->eks_invalid = MCDI_OUT_DWORD(req, FC_OUT_LICENSE_INVALID_KEYS); eksp->eks_blacklisted = MCDI_OUT_DWORD(req, FC_OUT_LICENSE_BLACKLISTED_KEYS); eksp->eks_unverifiable = 0; eksp->eks_wrong_node = 0; eksp->eks_licensed_apps_lo = 0; eksp->eks_licensed_apps_hi = 0; eksp->eks_licensed_features_lo = 0; eksp->eks_licensed_features_hi = 0; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_SIENA */ /* V1 and V2 Partition format - based on a 16-bit TLV format */ #if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON /* * V1/V2 format - defined in SF-108542-TC section 4.2: * Type (T): 16bit - revision/HMAC algorithm * Length (L): 16bit - value length in bytes * Value (V): L bytes - payload */ -#define EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX (256) -#define EFX_LICENSE_V1V2_HEADER_LENGTH (2 * sizeof(uint16_t)) +#define EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX (256) +#define EFX_LICENSE_V1V2_HEADER_LENGTH (2 * sizeof (uint16_t)) __checkReturn efx_rc_t efx_lic_v1v2_find_start( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, - __out uint32_t *startp - ) + __out uint32_t *startp) { _NOTE(ARGUNUSED(enp, bufferp, buffer_size)) *startp = 0; return (0); } __checkReturn efx_rc_t efx_lic_v1v2_find_end( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, - __out uint32_t *endp - ) + __out uint32_t *endp) { _NOTE(ARGUNUSED(enp, bufferp, buffer_size)) *endp = offset + EFX_LICENSE_V1V2_HEADER_LENGTH; return (0); } __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v1v2_find_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, - __out uint32_t *lengthp - ) + __out uint32_t *lengthp) { boolean_t found; uint16_t tlv_type; uint16_t tlv_length; _NOTE(ARGUNUSED(enp)) if ((size_t)buffer_size - offset < EFX_LICENSE_V1V2_HEADER_LENGTH) goto fail1; tlv_type = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[0]); tlv_length = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[1]); if ((tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) || (tlv_type == 0 && tlv_length == 0)) { found = B_FALSE; } else { *startp = offset; *lengthp = tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH; found = B_TRUE; } return (found); fail1: - EFSYS_PROBE(fail1); + EFSYS_PROBE1(fail1, boolean_t, B_FALSE); return (B_FALSE); } __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v1v2_validate_key( __in efx_nic_t *enp, __in_bcount(length) caddr_t keyp, - __in uint32_t length - ) + __in uint32_t length) { uint16_t tlv_type; uint16_t tlv_length; _NOTE(ARGUNUSED(enp)) if (length < EFX_LICENSE_V1V2_HEADER_LENGTH) { goto fail1; } tlv_type = __LE_TO_CPU_16(((uint16_t *)keyp)[0]); tlv_length = __LE_TO_CPU_16(((uint16_t *)keyp)[1]); if (tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) { goto fail2; } if (tlv_type == 0) { goto fail3; } if ((tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH) != length) { goto fail4; } return (B_TRUE); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE(fail1); + EFSYS_PROBE1(fail1, boolean_t, B_FALSE); return (B_FALSE); } __checkReturn efx_rc_t efx_lic_v1v2_read_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(key_max_size, *lengthp) caddr_t keyp, __in size_t key_max_size, - __out uint32_t *lengthp - ) + __out uint32_t *lengthp) { efx_rc_t rc; - _NOTE(ARGUNUSED(enp)) + _NOTE(ARGUNUSED(enp, buffer_size)) EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX + EFX_LICENSE_V1V2_HEADER_LENGTH)); if (key_max_size < length) { rc = ENOSPC; goto fail1; } memcpy(keyp, &bufferp[offset], length); *lengthp = length; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_v1v2_write_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, - __out uint32_t *lengthp - ) + __out uint32_t *lengthp) { efx_rc_t rc; _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX + EFX_LICENSE_V1V2_HEADER_LENGTH)); /* Ensure space for terminator remains */ if ((offset + length) > (buffer_size - EFX_LICENSE_V1V2_HEADER_LENGTH)) { rc = ENOSPC; goto fail1; } memcpy(bufferp + offset, keyp, length); *lengthp = length; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_v1v2_delete_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end, - __out uint32_t *deltap - ) + __out uint32_t *deltap) { uint32_t move_start = offset + length; uint32_t move_length = end - move_start; - _NOTE(ARGUNUSED(enp)) + _NOTE(ARGUNUSED(enp, buffer_size)) EFSYS_ASSERT(end <= buffer_size); /* Shift everything after the key down */ memmove(bufferp + offset, bufferp + move_start, move_length); *deltap = length; return (0); } __checkReturn efx_rc_t efx_lic_v1v2_create_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, - __in size_t buffer_size - ) + __in size_t buffer_size) { - _NOTE(ARGUNUSED(enp)) + _NOTE(ARGUNUSED(enp, buffer_size)) EFSYS_ASSERT(EFX_LICENSE_V1V2_HEADER_LENGTH <= buffer_size); /* Write terminator */ memset(bufferp, '\0', EFX_LICENSE_V1V2_HEADER_LENGTH); return (0); } __checkReturn efx_rc_t efx_lic_v1v2_finish_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, - __in size_t buffer_size - ) + __in size_t buffer_size) { _NOTE(ARGUNUSED(enp, bufferp, buffer_size)) return (0); } #endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */ /* V2 Licensing - used by Huntington family only. See SF-113611-TC */ #if EFSYS_OPT_HUNTINGTON static __checkReturn efx_rc_t efx_mcdi_licensed_app_state( __in efx_nic_t *enp, __in uint64_t app_id, __out boolean_t *licensedp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_LICENSED_APP_STATE_IN_LEN, MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN)]; uint32_t app_state; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); /* V2 licensing supports 32bit app id only */ if ((app_id >> 32) != 0) { rc = EINVAL; goto fail1; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_LICENSED_APP_STATE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_LICENSED_APP_STATE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN; MCDI_IN_SET_DWORD(req, GET_LICENSED_APP_STATE_IN_APP_ID, app_id & 0xffffffff); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } if (req.emr_out_length_used < MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN) { rc = EMSGSIZE; goto fail3; } app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_APP_STATE_OUT_STATE)); if (app_state != MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED) { *licensedp = B_TRUE; } else { *licensedp = B_FALSE; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_licensing_update_licenses( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MC_CMD_LICENSING_IN_LEN]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LICENSING; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_LICENSING_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = 0; MCDI_IN_SET_DWORD(req, LICENSING_IN_OP, MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used != 0) { rc = EIO; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_licensing_get_key_stats( __in efx_nic_t *enp, __out efx_key_stats_t *eksp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_LICENSING_IN_LEN, MC_CMD_LICENSING_OUT_LEN)]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LICENSING; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_LICENSING_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_LICENSING_OUT_LEN; MCDI_IN_SET_DWORD(req, LICENSING_IN_OP, MC_CMD_LICENSING_IN_OP_GET_KEY_STATS); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_LICENSING_OUT_LEN) { rc = EMSGSIZE; goto fail2; } eksp->eks_valid = MCDI_OUT_DWORD(req, LICENSING_OUT_VALID_APP_KEYS); eksp->eks_invalid = MCDI_OUT_DWORD(req, LICENSING_OUT_INVALID_APP_KEYS); eksp->eks_blacklisted = MCDI_OUT_DWORD(req, LICENSING_OUT_BLACKLISTED_APP_KEYS); eksp->eks_unverifiable = MCDI_OUT_DWORD(req, LICENSING_OUT_UNVERIFIABLE_APP_KEYS); eksp->eks_wrong_node = MCDI_OUT_DWORD(req, LICENSING_OUT_WRONG_NODE_APP_KEYS); eksp->eks_licensed_apps_lo = 0; eksp->eks_licensed_apps_hi = 0; eksp->eks_licensed_features_lo = 0; eksp->eks_licensed_features_hi = 0; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_HUNTINGTON */ /* V3 Licensing - used starting from Medford family. See SF-114884-SW */ #if EFSYS_OPT_MEDFORD static __checkReturn efx_rc_t efx_mcdi_licensing_v3_update_licenses( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LICENSING_V3; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN; req.emr_out_buf = NULL; req.emr_out_length = 0; MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP, MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_licensing_v3_report_license( __in efx_nic_t *enp, __out efx_key_stats_t *eksp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_LICENSING_V3_IN_LEN, MC_CMD_LICENSING_V3_OUT_LEN)]; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LICENSING_V3; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_LICENSING_V3_OUT_LEN; MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP, MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_LICENSING_V3_OUT_LEN) { rc = EMSGSIZE; goto fail2; } eksp->eks_valid = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_VALID_KEYS); eksp->eks_invalid = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_INVALID_KEYS); eksp->eks_blacklisted = 0; eksp->eks_unverifiable = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_UNVERIFIABLE_KEYS); eksp->eks_wrong_node = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_WRONG_NODE_KEYS); eksp->eks_licensed_apps_lo = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_LO); eksp->eks_licensed_apps_hi = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_HI); eksp->eks_licensed_features_lo = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_LO); eksp->eks_licensed_features_hi = MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_HI); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_licensing_v3_app_state( __in efx_nic_t *enp, __in uint64_t app_id, __out boolean_t *licensedp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN, MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN)]; uint32_t app_state; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN; MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO, app_id & 0xffffffff); MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI, app_id >> 32); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } - if (req.emr_out_length_used < MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN) { + if (req.emr_out_length_used < + MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN) { rc = EMSGSIZE; goto fail2; } app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_V3_APP_STATE_OUT_STATE)); if (app_state != MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED) { *licensedp = B_TRUE; } else { *licensedp = B_FALSE; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_licensing_v3_get_id( __in efx_nic_t *enp, __in size_t buffer_size, __out uint32_t *typep, __out size_t *lengthp, __out_bcount_part_opt(buffer_size, *lengthp) uint8_t *bufferp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_LICENSING_GET_ID_V3_IN_LEN, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN)]; efx_rc_t rc; req.emr_cmd = MC_CMD_LICENSING_GET_ID_V3; if (bufferp == NULL) { /* Request id type and length only */ req.emr_in_buf = bufferp; req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN; req.emr_out_buf = bufferp; req.emr_out_length = MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN; (void) memset(payload, 0, sizeof (payload)); } else { /* Request full buffer */ req.emr_in_buf = bufferp; req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN; req.emr_out_buf = bufferp; - req.emr_out_length = MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX); + req.emr_out_length = + MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX); (void) memset(bufferp, 0, req.emr_out_length); } efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN) { rc = EMSGSIZE; goto fail2; } *typep = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_TYPE); - *lengthp = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH); + *lengthp = + MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH); if (bufferp == NULL) { - /* modify length requirements to indicate to caller the extra buffering - ** needed to read the complete output. - */ + /* + * Modify length requirements to indicate to caller the extra + * buffering needed to read the complete output. + */ *lengthp += MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN; } else { /* Shift ID down to start of buffer */ memmove(bufferp, bufferp + MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST, *lengthp); memset(bufferp + (*lengthp), 0, MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST); } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* V3 format uses Huntington TLV format partition. See SF-108797-SW */ -#define EFX_LICENSE_V3_KEY_LENGTH_MIN (64) -#define EFX_LICENSE_V3_KEY_LENGTH_MAX (160) +#define EFX_LICENSE_V3_KEY_LENGTH_MIN (64) +#define EFX_LICENSE_V3_KEY_LENGTH_MAX (160) __checkReturn efx_rc_t efx_lic_v3_find_start( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, - __out uint32_t *startp - ) + __out uint32_t *startp) { _NOTE(ARGUNUSED(enp)) - return ef10_nvram_buffer_find_item_start(bufferp, buffer_size, startp); + return (ef10_nvram_buffer_find_item_start(bufferp, buffer_size, + startp)); } __checkReturn efx_rc_t efx_lic_v3_find_end( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, - __out uint32_t *endp - ) + __out uint32_t *endp) { _NOTE(ARGUNUSED(enp)) - return ef10_nvram_buffer_find_end(bufferp, buffer_size, offset, endp); + return (ef10_nvram_buffer_find_end(bufferp, buffer_size, offset, endp)); } __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v3_find_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, - __out uint32_t *lengthp - ) + __out uint32_t *lengthp) { _NOTE(ARGUNUSED(enp)) return ef10_nvram_buffer_find_item(bufferp, buffer_size, offset, startp, lengthp); } __checkReturn __success(return != B_FALSE) boolean_t efx_lic_v3_validate_key( __in efx_nic_t *enp, __in_bcount(length) caddr_t keyp, - __in uint32_t length - ) + __in uint32_t length) { /* Check key is a valid V3 key */ uint8_t key_type; uint8_t key_length; _NOTE(ARGUNUSED(enp)) if (length < EFX_LICENSE_V3_KEY_LENGTH_MIN) { goto fail1; } if (length > EFX_LICENSE_V3_KEY_LENGTH_MAX) { goto fail2; } key_type = ((uint8_t *)keyp)[0]; key_length = ((uint8_t *)keyp)[1]; if (key_type < 3) { goto fail3; } if (key_length > length) { goto fail4; } return (B_TRUE); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: - EFSYS_PROBE(fail1); + EFSYS_PROBE1(fail1, boolean_t, B_FALSE); return (B_FALSE); } __checkReturn efx_rc_t efx_lic_v3_read_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(key_max_size, *lengthp) caddr_t keyp, __in size_t key_max_size, - __out uint32_t *lengthp - ) + __out uint32_t *lengthp) { _NOTE(ARGUNUSED(enp)) return ef10_nvram_buffer_get_item(bufferp, buffer_size, offset, length, keyp, key_max_size, lengthp); } __checkReturn efx_rc_t efx_lic_v3_write_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, - __out uint32_t *lengthp - ) + __out uint32_t *lengthp) { _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT(length <= EFX_LICENSE_V3_KEY_LENGTH_MAX); return ef10_nvram_buffer_insert_item(bufferp, buffer_size, offset, keyp, length, lengthp); } __checkReturn efx_rc_t efx_lic_v3_delete_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end, - __out uint32_t *deltap - ) + __out uint32_t *deltap) { efx_rc_t rc; _NOTE(ARGUNUSED(enp)) if ((rc = ef10_nvram_buffer_delete_item(bufferp, buffer_size, offset, length, end)) != 0) { goto fail1; } *deltap = length; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_v3_create_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, - __in size_t buffer_size - ) + __in size_t buffer_size) { efx_rc_t rc; /* Construct empty partition */ if ((rc = ef10_nvram_buffer_create(enp, NVRAM_PARTITION_TYPE_LICENSE, bufferp, buffer_size)) != 0) { rc = EFAULT; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_v3_finish_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, - __in size_t buffer_size - ) + __in size_t buffer_size) { efx_rc_t rc; if ((rc = ef10_nvram_buffer_finish(bufferp, buffer_size)) != 0) { goto fail1; } /* Validate completed partition */ if ((rc = ef10_nvram_buffer_validate(enp, NVRAM_PARTITION_TYPE_LICENSE, bufferp, buffer_size)) != 0) { goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_lic_init( __in efx_nic_t *enp) { const efx_lic_ops_t *elop; efx_key_stats_t eks; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_LIC)); switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: elop = &__efx_lic_v1_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: elop = &__efx_lic_v2_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: elop = &__efx_lic_v3_ops; break; #endif /* EFSYS_OPT_MEDFORD */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail1; } enp->en_elop = elop; enp->en_mod_flags |= EFX_MOD_LIC; /* Probe for support */ if (efx_lic_get_key_stats(enp, &eks) == 0) { enp->en_licensing_supported = B_TRUE; } else { enp->en_licensing_supported = B_FALSE; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } extern __checkReturn boolean_t efx_lic_check_support( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); - return enp->en_licensing_supported; + return (enp->en_licensing_supported); } void efx_lic_fini( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); enp->en_elop = NULL; enp->en_mod_flags &= ~EFX_MOD_LIC; } __checkReturn efx_rc_t efx_lic_update_licenses( __in efx_nic_t *enp) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_update_licenses(enp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_get_key_stats( __in efx_nic_t *enp, __out efx_key_stats_t *eksp) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_get_key_stats(enp, eksp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_app_state( __in efx_nic_t *enp, __in uint64_t app_id, __out boolean_t *licensedp) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if (elop->elo_app_state == NULL) return (ENOTSUP); if ((rc = elop->elo_app_state(enp, app_id, licensedp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_get_id( __in efx_nic_t *enp, __in size_t buffer_size, __out uint32_t *typep, __out size_t *lengthp, - __out_opt uint8_t *bufferp - ) + __out_opt uint8_t *bufferp) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if (elop->elo_get_id == NULL) return (ENOTSUP); if ((rc = elop->elo_get_id(enp, buffer_size, typep, lengthp, bufferp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } -/* Buffer management API - abstracts varying TLV format used for License partition */ +/* + * Buffer management API - abstracts varying TLV format used for License + * partition. + */ __checkReturn efx_rc_t efx_lic_find_start( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, - __out uint32_t *startp - ) + __out uint32_t *startp) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_find_start(enp, bufferp, buffer_size, startp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_find_end( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, - __out uint32_t *endp - ) + __out uint32_t *endp) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); - if ((rc = elop->elo_find_end(enp, bufferp, buffer_size, offset, endp)) != 0) + rc = elop->elo_find_end(enp, bufferp, buffer_size, offset, endp); + if (rc != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn __success(return != B_FALSE) boolean_t efx_lic_find_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, - __out uint32_t *lengthp - ) + __out uint32_t *lengthp) { const efx_lic_ops_t *elop = enp->en_elop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); EFSYS_ASSERT(bufferp); EFSYS_ASSERT(startp); EFSYS_ASSERT(lengthp); return (elop->elo_find_key(enp, bufferp, buffer_size, offset, startp, lengthp)); } -/* Validate that the buffer contains a single key in a recognised format. -** An empty or terminator buffer is not accepted as a valid key. -*/ +/* + * Validate that the buffer contains a single key in a recognised format. + * An empty or terminator buffer is not accepted as a valid key. + */ __checkReturn __success(return != B_FALSE) boolean_t efx_lic_validate_key( __in efx_nic_t *enp, __in_bcount(length) caddr_t keyp, - __in uint32_t length - ) + __in uint32_t length) { const efx_lic_ops_t *elop = enp->en_elop; boolean_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_validate_key(enp, keyp, length)) == B_FALSE) goto fail1; return (B_TRUE); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_read_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(key_max_size, *lengthp) caddr_t keyp, __in size_t key_max_size, - __out uint32_t *lengthp - ) + __out uint32_t *lengthp) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_read_key(enp, bufferp, buffer_size, offset, length, keyp, key_max_size, lengthp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_write_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, - __out uint32_t *lengthp - ) + __out uint32_t *lengthp) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_write_key(enp, bufferp, buffer_size, offset, keyp, length, lengthp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_delete_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end, - __out uint32_t *deltap - ) + __out uint32_t *deltap) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_delete_key(enp, bufferp, buffer_size, offset, length, end, deltap)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_create_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, - __in size_t buffer_size - ) + __in size_t buffer_size) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_create_partition(enp, bufferp, buffer_size)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_lic_finish_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, - __in size_t buffer_size - ) + __in size_t buffer_size) { const efx_lic_ops_t *elop = enp->en_elop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC); if ((rc = elop->elo_finish_partition(enp, bufferp, buffer_size)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_LICENSING */ Index: projects/clang700-import/sys/dev/sfxge/common/efx_mac.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_mac.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_mac.c (revision 340918) @@ -1,956 +1,947 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t siena_mac_multicast_list_set( __in efx_nic_t *enp); #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA static const efx_mac_ops_t __efx_mac_siena_ops = { siena_mac_poll, /* emo_poll */ siena_mac_up, /* emo_up */ siena_mac_reconfigure, /* emo_addr_set */ siena_mac_reconfigure, /* emo_pdu_set */ siena_mac_pdu_get, /* emo_pdu_get */ siena_mac_reconfigure, /* emo_reconfigure */ siena_mac_multicast_list_set, /* emo_multicast_list_set */ NULL, /* emo_filter_set_default_rxq */ NULL, /* emo_filter_default_rxq_clear */ #if EFSYS_OPT_LOOPBACK siena_mac_loopback_set, /* emo_loopback_set */ #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS siena_mac_stats_get_mask, /* emo_stats_get_mask */ efx_mcdi_mac_stats_clear, /* emo_stats_clear */ efx_mcdi_mac_stats_upload, /* emo_stats_upload */ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */ siena_mac_stats_update /* emo_stats_update */ #endif /* EFSYS_OPT_MAC_STATS */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD static const efx_mac_ops_t __efx_mac_ef10_ops = { ef10_mac_poll, /* emo_poll */ ef10_mac_up, /* emo_up */ ef10_mac_addr_set, /* emo_addr_set */ ef10_mac_pdu_set, /* emo_pdu_set */ ef10_mac_pdu_get, /* emo_pdu_get */ ef10_mac_reconfigure, /* emo_reconfigure */ ef10_mac_multicast_list_set, /* emo_multicast_list_set */ ef10_mac_filter_default_rxq_set, /* emo_filter_default_rxq_set */ ef10_mac_filter_default_rxq_clear, /* emo_filter_default_rxq_clear */ #if EFSYS_OPT_LOOPBACK ef10_mac_loopback_set, /* emo_loopback_set */ #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS ef10_mac_stats_get_mask, /* emo_stats_get_mask */ efx_mcdi_mac_stats_clear, /* emo_stats_clear */ efx_mcdi_mac_stats_upload, /* emo_stats_upload */ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */ ef10_mac_stats_update /* emo_stats_update */ #endif /* EFSYS_OPT_MAC_STATS */ }; #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_mac_pdu_set( __in efx_nic_t *enp, __in size_t pdu) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; uint32_t old_pdu; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); if (pdu < EFX_MAC_PDU_MIN) { rc = EINVAL; goto fail1; } if (pdu > EFX_MAC_PDU_MAX) { rc = EINVAL; goto fail2; } old_pdu = epp->ep_mac_pdu; epp->ep_mac_pdu = (uint32_t)pdu; if ((rc = emop->emo_pdu_set(enp)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); epp->ep_mac_pdu = old_pdu; fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mac_pdu_get( __in efx_nic_t *enp, __out size_t *pdu) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; efx_rc_t rc; if ((rc = emop->emo_pdu_get(enp, pdu)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mac_addr_set( __in efx_nic_t *enp, __in uint8_t *addr) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; uint8_t old_addr[6]; uint32_t oui; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if (EFX_MAC_ADDR_IS_MULTICAST(addr)) { rc = EINVAL; goto fail1; } oui = addr[0] << 16 | addr[1] << 8 | addr[2]; if (oui == 0x000000) { rc = EINVAL; goto fail2; } EFX_MAC_ADDR_COPY(old_addr, epp->ep_mac_addr); EFX_MAC_ADDR_COPY(epp->ep_mac_addr, addr); if ((rc = emop->emo_addr_set(enp)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); EFX_MAC_ADDR_COPY(epp->ep_mac_addr, old_addr); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mac_filter_set( __in efx_nic_t *enp, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; boolean_t old_all_unicst; boolean_t old_mulcst; boolean_t old_all_mulcst; boolean_t old_brdcst; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); old_all_unicst = epp->ep_all_unicst; old_mulcst = epp->ep_mulcst; old_all_mulcst = epp->ep_all_mulcst; old_brdcst = epp->ep_brdcst; epp->ep_all_unicst = all_unicst; epp->ep_mulcst = mulcst; epp->ep_all_mulcst = all_mulcst; epp->ep_brdcst = brdcst; if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); epp->ep_all_unicst = old_all_unicst; epp->ep_mulcst = old_mulcst; epp->ep_all_mulcst = old_all_mulcst; epp->ep_brdcst = old_brdcst; return (rc); } __checkReturn efx_rc_t efx_mac_drain( __in efx_nic_t *enp, __in boolean_t enabled) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); if (epp->ep_mac_drain == enabled) return (0); epp->ep_mac_drain = enabled; if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if ((rc = emop->emo_up(enp, mac_upp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mac_fcntl_set( __in efx_nic_t *enp, __in unsigned int fcntl, __in boolean_t autoneg) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; const efx_phy_ops_t *epop = epp->ep_epop; unsigned int old_fcntl; boolean_t old_autoneg; unsigned int old_adv_cap; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if ((fcntl & ~(EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE)) != 0) { rc = EINVAL; goto fail1; } /* * Ignore a request to set flow control auto-negotiation * if the PHY doesn't support it. */ if (~epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN)) autoneg = B_FALSE; old_fcntl = epp->ep_fcntl; old_autoneg = epp->ep_fcntl_autoneg; old_adv_cap = epp->ep_adv_cap_mask; epp->ep_fcntl = fcntl; epp->ep_fcntl_autoneg = autoneg; /* * Always encode the flow control settings in the advertised * capabilities even if we are not trying to auto-negotiate * them and reconfigure both the PHY and the MAC. */ if (fcntl & EFX_FCNTL_RESPOND) epp->ep_adv_cap_mask |= (1 << EFX_PHY_CAP_PAUSE | 1 << EFX_PHY_CAP_ASYM); else epp->ep_adv_cap_mask &= ~(1 << EFX_PHY_CAP_PAUSE | 1 << EFX_PHY_CAP_ASYM); if (fcntl & EFX_FCNTL_GENERATE) epp->ep_adv_cap_mask ^= (1 << EFX_PHY_CAP_ASYM); if ((rc = epop->epo_reconfigure(enp)) != 0) goto fail2; if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); epp->ep_fcntl = old_fcntl; epp->ep_fcntl_autoneg = old_autoneg; epp->ep_adv_cap_mask = old_adv_cap; fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_mac_fcntl_get( __in efx_nic_t *enp, __out unsigned int *fcntl_wantedp, __out unsigned int *fcntl_linkp) { efx_port_t *epp = &(enp->en_port); unsigned int wanted = 0; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); /* * Decode the requested flow control settings from the PHY * advertised capabilities. */ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_PAUSE)) wanted = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_ASYM)) wanted ^= EFX_FCNTL_GENERATE; *fcntl_linkp = epp->ep_fcntl; *fcntl_wantedp = wanted; } __checkReturn efx_rc_t efx_mac_multicast_list_set( __in efx_nic_t *enp, __in_ecount(6*count) uint8_t const *addrs, __in int count) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; uint8_t *old_mulcst_addr_list = NULL; uint32_t old_mulcst_addr_count; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if (count > EFX_MAC_MULTICAST_LIST_MAX) { rc = EINVAL; goto fail1; } old_mulcst_addr_count = epp->ep_mulcst_addr_count; if (old_mulcst_addr_count > 0) { /* Allocate memory to store old list (instead of using stack) */ EFSYS_KMEM_ALLOC(enp->en_esip, old_mulcst_addr_count * EFX_MAC_ADDR_LEN, old_mulcst_addr_list); if (old_mulcst_addr_list == NULL) { rc = ENOMEM; goto fail2; } /* Save the old list in case we need to rollback */ memcpy(old_mulcst_addr_list, epp->ep_mulcst_addr_list, old_mulcst_addr_count * EFX_MAC_ADDR_LEN); } /* Store the new list */ memcpy(epp->ep_mulcst_addr_list, addrs, count * EFX_MAC_ADDR_LEN); epp->ep_mulcst_addr_count = count; if ((rc = emop->emo_multicast_list_set(enp)) != 0) goto fail3; if (old_mulcst_addr_count > 0) { EFSYS_KMEM_FREE(enp->en_esip, old_mulcst_addr_count * EFX_MAC_ADDR_LEN, old_mulcst_addr_list); } return (0); fail3: EFSYS_PROBE(fail3); /* Restore original list on failure */ epp->ep_mulcst_addr_count = old_mulcst_addr_count; if (old_mulcst_addr_count > 0) { memcpy(epp->ep_mulcst_addr_list, old_mulcst_addr_list, old_mulcst_addr_count * EFX_MAC_ADDR_LEN); EFSYS_KMEM_FREE(enp->en_esip, old_mulcst_addr_count * EFX_MAC_ADDR_LEN, old_mulcst_addr_list); } fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mac_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if (emop->emo_filter_default_rxq_set != NULL) { rc = emop->emo_filter_default_rxq_set(enp, erp, using_rss); if (rc != 0) goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_mac_filter_default_rxq_clear( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); if (emop->emo_filter_default_rxq_clear != NULL) emop->emo_filter_default_rxq_clear(enp); } #if EFSYS_OPT_MAC_STATS #if EFSYS_OPT_NAMES /* START MKCONFIG GENERATED EfxMacStatNamesBlock c11b91b42f922516 */ static const char * const __efx_mac_stat_name[] = { "rx_octets", "rx_pkts", "rx_unicst_pkts", "rx_multicst_pkts", "rx_brdcst_pkts", "rx_pause_pkts", "rx_le_64_pkts", "rx_65_to_127_pkts", "rx_128_to_255_pkts", "rx_256_to_511_pkts", "rx_512_to_1023_pkts", "rx_1024_to_15xx_pkts", "rx_ge_15xx_pkts", "rx_errors", "rx_fcs_errors", "rx_drop_events", "rx_false_carrier_errors", "rx_symbol_errors", "rx_align_errors", "rx_internal_errors", "rx_jabber_pkts", "rx_lane0_char_err", "rx_lane1_char_err", "rx_lane2_char_err", "rx_lane3_char_err", "rx_lane0_disp_err", "rx_lane1_disp_err", "rx_lane2_disp_err", "rx_lane3_disp_err", "rx_match_fault", "rx_nodesc_drop_cnt", "tx_octets", "tx_pkts", "tx_unicst_pkts", "tx_multicst_pkts", "tx_brdcst_pkts", "tx_pause_pkts", "tx_le_64_pkts", "tx_65_to_127_pkts", "tx_128_to_255_pkts", "tx_256_to_511_pkts", "tx_512_to_1023_pkts", "tx_1024_to_15xx_pkts", "tx_ge_15xx_pkts", "tx_errors", "tx_sgl_col_pkts", "tx_mult_col_pkts", "tx_ex_col_pkts", "tx_late_col_pkts", "tx_def_pkts", "tx_ex_def_pkts", "pm_trunc_bb_overflow", "pm_discard_bb_overflow", "pm_trunc_vfifo_full", "pm_discard_vfifo_full", "pm_trunc_qbb", "pm_discard_qbb", "pm_discard_mapping", "rxdp_q_disabled_pkts", "rxdp_di_dropped_pkts", "rxdp_streaming_pkts", "rxdp_hlb_fetch", "rxdp_hlb_wait", "vadapter_rx_unicast_packets", "vadapter_rx_unicast_bytes", "vadapter_rx_multicast_packets", "vadapter_rx_multicast_bytes", "vadapter_rx_broadcast_packets", "vadapter_rx_broadcast_bytes", "vadapter_rx_bad_packets", "vadapter_rx_bad_bytes", "vadapter_rx_overflow", "vadapter_tx_unicast_packets", "vadapter_tx_unicast_bytes", "vadapter_tx_multicast_packets", "vadapter_tx_multicast_bytes", "vadapter_tx_broadcast_packets", "vadapter_tx_broadcast_bytes", "vadapter_tx_bad_packets", "vadapter_tx_bad_bytes", "vadapter_tx_overflow", }; /* END MKCONFIG GENERATED EfxMacStatNamesBlock */ __checkReturn const char * efx_mac_stat_name( __in efx_nic_t *enp, __in unsigned int id) { _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(id, <, EFX_MAC_NSTATS); return (__efx_mac_stat_name[id]); } #endif /* EFSYS_OPT_NAMES */ static efx_rc_t efx_mac_stats_mask_add_range( __inout_bcount(mask_size) uint32_t *maskp, __in size_t mask_size, __in const struct efx_mac_stats_range *rngp) { unsigned int mask_npages = mask_size / sizeof (*maskp); unsigned int el; unsigned int el_min; unsigned int el_max; unsigned int low; unsigned int high; unsigned int width; efx_rc_t rc; if ((mask_npages * EFX_MAC_STATS_MASK_BITS_PER_PAGE) <= (unsigned int)rngp->last) { rc = EINVAL; goto fail1; } EFSYS_ASSERT3U(rngp->first, <=, rngp->last); EFSYS_ASSERT3U(rngp->last, <, EFX_MAC_NSTATS); for (el = 0; el < mask_npages; ++el) { el_min = el * EFX_MAC_STATS_MASK_BITS_PER_PAGE; el_max = el_min + (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1); if ((unsigned int)rngp->first > el_max || (unsigned int)rngp->last < el_min) continue; low = MAX((unsigned int)rngp->first, el_min); high = MIN((unsigned int)rngp->last, el_max); width = high - low + 1; maskp[el] |= (width == EFX_MAC_STATS_MASK_BITS_PER_PAGE) ? (~0ULL) : (((1ULL << width) - 1) << (low - el_min)); } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } efx_rc_t efx_mac_stats_mask_add_ranges( __inout_bcount(mask_size) uint32_t *maskp, __in size_t mask_size, __in_ecount(rng_count) const struct efx_mac_stats_range *rngp, __in unsigned int rng_count) { unsigned int i; efx_rc_t rc; for (i = 0; i < rng_count; ++i) { if ((rc = efx_mac_stats_mask_add_range(maskp, mask_size, &rngp[i])) != 0) goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mac_stats_get_mask( __in efx_nic_t *enp, __out_bcount(mask_size) uint32_t *maskp, __in size_t mask_size) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(maskp != NULL); EFSYS_ASSERT(mask_size % sizeof (maskp[0]) == 0); (void) memset(maskp, 0, mask_size); if ((rc = emop->emo_stats_get_mask(enp, maskp, mask_size)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mac_stats_clear( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); if ((rc = emop->emo_stats_clear(enp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mac_stats_upload( __in efx_nic_t *enp, __in efsys_mem_t *esmp) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); - /* - * Don't assert !ep_mac_stats_pending, because the client might - * have failed to finalise statistics when previously stopping - * the port. - */ if ((rc = emop->emo_stats_upload(enp, esmp)) != 0) goto fail1; - epp->ep_mac_stats_pending = B_TRUE; - return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mac_stats_periodic( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __in uint16_t period_ms, __in boolean_t events) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); if (emop->emo_stats_periodic == NULL) { rc = EINVAL; goto fail1; } if ((rc = emop->emo_stats_periodic(enp, esmp, period_ms, events)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *essp, __inout_opt uint32_t *generationp) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); rc = emop->emo_stats_update(enp, esmp, essp, generationp); - if (rc == 0) - epp->ep_mac_stats_pending = B_FALSE; return (rc); } #endif /* EFSYS_OPT_MAC_STATS */ __checkReturn efx_rc_t efx_mac_select( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_mac_type_t type = EFX_MAC_INVALID; const efx_mac_ops_t *emop; int rc = EINVAL; switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: emop = &__efx_mac_siena_ops; type = EFX_MAC_SIENA; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: emop = &__efx_mac_ef10_ops; type = EFX_MAC_HUNTINGTON; break; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: emop = &__efx_mac_ef10_ops; type = EFX_MAC_MEDFORD; break; #endif /* EFSYS_OPT_MEDFORD */ default: rc = EINVAL; goto fail1; } EFSYS_ASSERT(type != EFX_MAC_INVALID); EFSYS_ASSERT3U(type, <, EFX_MAC_NTYPES); EFSYS_ASSERT(emop != NULL); epp->ep_emop = emop; epp->ep_mac_type = type; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_SIENA #define EFX_MAC_HASH_BITS (1 << 8) /* Compute the multicast hash as used on Falcon and Siena. */ static void siena_mac_multicast_hash_compute( __in_ecount(6*count) uint8_t const *addrs, __in int count, __out efx_oword_t *hash_low, __out efx_oword_t *hash_high) { uint32_t crc, index; int i; EFSYS_ASSERT(hash_low != NULL); EFSYS_ASSERT(hash_high != NULL); EFX_ZERO_OWORD(*hash_low); EFX_ZERO_OWORD(*hash_high); for (i = 0; i < count; i++) { /* Calculate hash bucket (IEEE 802.3 CRC32 of the MAC addr) */ crc = efx_crc32_calculate(0xffffffff, addrs, EFX_MAC_ADDR_LEN); index = crc % EFX_MAC_HASH_BITS; if (index < 128) { EFX_SET_OWORD_BIT(*hash_low, index); } else { EFX_SET_OWORD_BIT(*hash_high, index - 128); } addrs += EFX_MAC_ADDR_LEN; } } static __checkReturn efx_rc_t siena_mac_multicast_list_set( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; efx_oword_t old_hash[2]; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); memcpy(old_hash, epp->ep_multicst_hash, sizeof (old_hash)); siena_mac_multicast_hash_compute( epp->ep_mulcst_addr_list, epp->ep_mulcst_addr_count, &epp->ep_multicst_hash[0], &epp->ep_multicst_hash[1]); if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); memcpy(epp->ep_multicst_hash, old_hash, sizeof (old_hash)); return (rc); } #endif /* EFSYS_OPT_SIENA */ Index: projects/clang700-import/sys/dev/sfxge/common/efx_mcdi.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_mcdi.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_mcdi.c (revision 340918) @@ -1,2351 +1,2353 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_MCDI /* * There are three versions of the MCDI interface: * - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers. * - MCDIv1: Siena firmware and Huntington BootROM. * - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM. * Transport uses MCDIv2 headers. * * MCDIv2 Header NOT_EPOCH flag * ---------------------------- * A new epoch begins at initial startup or after an MC reboot, and defines when * the MC should reject stale MCDI requests. * * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1. * * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0. */ #if EFSYS_OPT_SIENA static const efx_mcdi_ops_t __efx_mcdi_siena_ops = { siena_mcdi_init, /* emco_init */ siena_mcdi_send_request, /* emco_send_request */ siena_mcdi_poll_reboot, /* emco_poll_reboot */ siena_mcdi_poll_response, /* emco_poll_response */ siena_mcdi_read_response, /* emco_read_response */ siena_mcdi_fini, /* emco_fini */ siena_mcdi_feature_supported, /* emco_feature_supported */ siena_mcdi_get_timeout, /* emco_get_timeout */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = { ef10_mcdi_init, /* emco_init */ ef10_mcdi_send_request, /* emco_send_request */ ef10_mcdi_poll_reboot, /* emco_poll_reboot */ ef10_mcdi_poll_response, /* emco_poll_response */ ef10_mcdi_read_response, /* emco_read_response */ ef10_mcdi_fini, /* emco_fini */ ef10_mcdi_feature_supported, /* emco_feature_supported */ ef10_mcdi_get_timeout, /* emco_get_timeout */ }; #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_mcdi_init( __in efx_nic_t *enp, __in const efx_mcdi_transport_t *emtp) { const efx_mcdi_ops_t *emcop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0); switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: emcop = &__efx_mcdi_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: emcop = &__efx_mcdi_ef10_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: emcop = &__efx_mcdi_ef10_ops; break; #endif /* EFSYS_OPT_MEDFORD */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail1; } if (enp->en_features & EFX_FEATURE_MCDI_DMA) { /* MCDI requires a DMA buffer in host memory */ if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) { rc = EINVAL; goto fail2; } } enp->en_mcdi.em_emtp = emtp; if (emcop != NULL && emcop->emco_init != NULL) { if ((rc = emcop->emco_init(enp, emtp)) != 0) goto fail3; } enp->en_mcdi.em_emcop = emcop; enp->en_mod_flags |= EFX_MOD_MCDI; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_mcdi.em_emcop = NULL; enp->en_mcdi.em_emtp = NULL; enp->en_mod_flags &= ~EFX_MOD_MCDI; return (rc); } void efx_mcdi_fini( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI); if (emcop != NULL && emcop->emco_fini != NULL) emcop->emco_fini(enp); emip->emi_port = 0; emip->emi_aborted = 0; enp->en_mcdi.em_emcop = NULL; enp->en_mod_flags &= ~EFX_MOD_MCDI; } void efx_mcdi_new_epoch( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efsys_lock_state_t state; /* Start a new epoch (allow fresh MCDI requests to succeed) */ EFSYS_LOCK(enp->en_eslp, state); emip->emi_new_epoch = B_TRUE; EFSYS_UNLOCK(enp->en_eslp, state); } static void efx_mcdi_send_request( __in efx_nic_t *enp, __in void *hdrp, __in size_t hdr_len, __in void *sdup, __in size_t sdu_len) { const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len); } static efx_rc_t efx_mcdi_poll_reboot( __in efx_nic_t *enp) { const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; efx_rc_t rc; rc = emcop->emco_poll_reboot(enp); return (rc); } static boolean_t efx_mcdi_poll_response( __in efx_nic_t *enp) { const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; boolean_t available; available = emcop->emco_poll_response(enp); return (available); } static void efx_mcdi_read_response( __in efx_nic_t *enp, __out void *bufferp, __in size_t offset, __in size_t length) { const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; emcop->emco_read_response(enp, bufferp, offset, length); } void efx_mcdi_request_start( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __in boolean_t ev_cpl) { #if EFSYS_OPT_MCDI_LOGGING const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; #endif efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_dword_t hdr[2]; size_t hdr_len; unsigned int max_version; unsigned int seq; unsigned int xflags; boolean_t new_epoch; efsys_lock_state_t state; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); /* * efx_mcdi_request_start() is naturally serialised against both * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(), * by virtue of there only being one outstanding MCDI request. * Unfortunately, upper layers may also call efx_mcdi_request_abort() * at any time, to timeout a pending mcdi request, That request may * then subsequently complete, meaning efx_mcdi_ev_cpl() or * efx_mcdi_ev_death() may end up running in parallel with * efx_mcdi_request_start(). This race is handled by ensuring that * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the * en_eslp lock. */ EFSYS_LOCK(enp->en_eslp, state); EFSYS_ASSERT(emip->emi_pending_req == NULL); emip->emi_pending_req = emrp; emip->emi_ev_cpl = ev_cpl; emip->emi_poll_cnt = 0; seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ); new_epoch = emip->emi_new_epoch; max_version = emip->emi_max_version; EFSYS_UNLOCK(enp->en_eslp, state); xflags = 0; if (ev_cpl) xflags |= MCDI_HEADER_XFLAGS_EVREQ; /* * Huntington firmware supports MCDIv2, but the Huntington BootROM only * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where * possible to support this. */ if ((max_version >= 2) && ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) || (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1) || (emrp->emr_out_length > MCDI_CTL_SDU_LEN_MAX_V1))) { /* Construct MCDI v2 header */ hdr_len = sizeof (hdr); EFX_POPULATE_DWORD_8(hdr[0], MCDI_HEADER_CODE, MC_CMD_V2_EXTN, MCDI_HEADER_RESYNC, 1, MCDI_HEADER_DATALEN, 0, MCDI_HEADER_SEQ, seq, MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, MCDI_HEADER_ERROR, 0, MCDI_HEADER_RESPONSE, 0, MCDI_HEADER_XFLAGS, xflags); EFX_POPULATE_DWORD_2(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd, MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length); } else { /* Construct MCDI v1 header */ hdr_len = sizeof (hdr[0]); EFX_POPULATE_DWORD_8(hdr[0], MCDI_HEADER_CODE, emrp->emr_cmd, MCDI_HEADER_RESYNC, 1, MCDI_HEADER_DATALEN, emrp->emr_in_length, MCDI_HEADER_SEQ, seq, MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, MCDI_HEADER_ERROR, 0, MCDI_HEADER_RESPONSE, 0, MCDI_HEADER_XFLAGS, xflags); } #if EFSYS_OPT_MCDI_LOGGING if (emtp->emt_logger != NULL) { emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST, &hdr, hdr_len, emrp->emr_in_buf, emrp->emr_in_length); } #endif /* EFSYS_OPT_MCDI_LOGGING */ efx_mcdi_send_request(enp, &hdr[0], hdr_len, emrp->emr_in_buf, emrp->emr_in_length); } static void efx_mcdi_read_response_header( __in efx_nic_t *enp, __inout efx_mcdi_req_t *emrp) { #if EFSYS_OPT_MCDI_LOGGING const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; #endif /* EFSYS_OPT_MCDI_LOGGING */ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_dword_t hdr[2]; unsigned int hdr_len; unsigned int data_len; unsigned int seq; unsigned int cmd; unsigned int error; efx_rc_t rc; EFSYS_ASSERT(emrp != NULL); efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0])); hdr_len = sizeof (hdr[0]); cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE); seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ); error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR); if (cmd != MC_CMD_V2_EXTN) { data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN); } else { efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1])); hdr_len += sizeof (hdr[1]); cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD); data_len = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN); } if (error && (data_len == 0)) { /* The MC has rebooted since the request was sent. */ EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); efx_mcdi_poll_reboot(enp); rc = EIO; goto fail1; } if ((cmd != emrp->emr_cmd) || (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) { /* Response is for a different request */ rc = EIO; goto fail2; } if (error) { efx_dword_t err[2]; unsigned int err_len = MIN(data_len, sizeof (err)); int err_code = MC_CMD_ERR_EPROTO; int err_arg = 0; /* Read error code (and arg num for MCDI v2 commands) */ efx_mcdi_read_response(enp, &err, hdr_len, err_len); if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t))) err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0); #ifdef WITH_MCDI_V2 if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t))) err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0); #endif emrp->emr_err_code = err_code; emrp->emr_err_arg = err_arg; #if EFSYS_OPT_MCDI_PROXY_AUTH if ((err_code == MC_CMD_ERR_PROXY_PENDING) && (err_len == sizeof (err))) { /* * The MCDI request would normally fail with EPERM, but * firmware has forwarded it to an authorization agent * attached to a privileged PF. * * Save the authorization request handle. The client * must wait for a PROXY_RESPONSE event, or timeout. */ emrp->emr_proxy_handle = err_arg; } #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ #if EFSYS_OPT_MCDI_LOGGING if (emtp->emt_logger != NULL) { emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_RESPONSE, &hdr, hdr_len, &err, err_len); } #endif /* EFSYS_OPT_MCDI_LOGGING */ if (!emrp->emr_quiet) { EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd, int, err_code, int, err_arg); } rc = efx_mcdi_request_errcode(err_code); goto fail3; } emrp->emr_rc = 0; emrp->emr_out_length_used = data_len; #if EFSYS_OPT_MCDI_PROXY_AUTH emrp->emr_proxy_handle = 0; #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ return; fail3: fail2: fail1: emrp->emr_rc = rc; emrp->emr_out_length_used = 0; } static void efx_mcdi_finish_response( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp) { #if EFSYS_OPT_MCDI_LOGGING const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; #endif /* EFSYS_OPT_MCDI_LOGGING */ efx_dword_t hdr[2]; unsigned int hdr_len; size_t bytes; if (emrp->emr_out_buf == NULL) return; /* Read the command header to detect MCDI response format */ hdr_len = sizeof (hdr[0]); efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len); if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) { /* * Read the actual payload length. The length given in the event * is only correct for responses with the V1 format. */ efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1])); hdr_len += sizeof (hdr[1]); emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN); } /* Copy payload out into caller supplied buffer */ bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length); efx_mcdi_read_response(enp, emrp->emr_out_buf, hdr_len, bytes); #if EFSYS_OPT_MCDI_LOGGING if (emtp->emt_logger != NULL) { emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_RESPONSE, &hdr, hdr_len, emrp->emr_out_buf, bytes); } #endif /* EFSYS_OPT_MCDI_LOGGING */ } __checkReturn boolean_t efx_mcdi_request_poll( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_mcdi_req_t *emrp; efsys_lock_state_t state; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); /* Serialise against post-watchdog efx_mcdi_ev* */ EFSYS_LOCK(enp->en_eslp, state); EFSYS_ASSERT(emip->emi_pending_req != NULL); EFSYS_ASSERT(!emip->emi_ev_cpl); emrp = emip->emi_pending_req; /* Check for reboot atomically w.r.t efx_mcdi_request_start */ if (emip->emi_poll_cnt++ == 0) { if ((rc = efx_mcdi_poll_reboot(enp)) != 0) { emip->emi_pending_req = NULL; EFSYS_UNLOCK(enp->en_eslp, state); /* Reboot/Assertion */ if (rc == EIO || rc == EINTR) efx_mcdi_raise_exception(enp, emrp, rc); goto fail1; } } /* Check if a response is available */ if (efx_mcdi_poll_response(enp) == B_FALSE) { EFSYS_UNLOCK(enp->en_eslp, state); return (B_FALSE); } /* Read the response header */ efx_mcdi_read_response_header(enp, emrp); /* Request complete */ emip->emi_pending_req = NULL; /* Ensure stale MCDI requests fail after an MC reboot. */ emip->emi_new_epoch = B_FALSE; EFSYS_UNLOCK(enp->en_eslp, state); if ((rc = emrp->emr_rc) != 0) goto fail2; efx_mcdi_finish_response(enp, emrp); return (B_TRUE); fail2: if (!emrp->emr_quiet) EFSYS_PROBE(fail2); fail1: if (!emrp->emr_quiet) EFSYS_PROBE1(fail1, efx_rc_t, rc); return (B_TRUE); } __checkReturn boolean_t efx_mcdi_request_abort( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_mcdi_req_t *emrp; boolean_t aborted; efsys_lock_state_t state; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); /* * efx_mcdi_ev_* may have already completed this event, and be * spinning/blocked on the upper layer lock. So it *is* legitimate * to for emi_pending_req to be NULL. If there is a pending event * completed request, then provide a "credit" to allow * efx_mcdi_ev_cpl() to accept a single spurious completion. */ EFSYS_LOCK(enp->en_eslp, state); emrp = emip->emi_pending_req; aborted = (emrp != NULL); if (aborted) { emip->emi_pending_req = NULL; /* Error the request */ emrp->emr_out_length_used = 0; emrp->emr_rc = ETIMEDOUT; /* Provide a credit for seqno/emr_pending_req mismatches */ if (emip->emi_ev_cpl) ++emip->emi_aborted; /* * The upper layer has called us, so we don't * need to complete the request. */ } EFSYS_UNLOCK(enp->en_eslp, state); return (aborted); } void efx_mcdi_get_timeout( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __out uint32_t *timeoutp) { const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; emcop->emco_get_timeout(enp, emrp, timeoutp); } __checkReturn efx_rc_t efx_mcdi_request_errcode( __in unsigned int err) { switch (err) { /* MCDI v1 */ case MC_CMD_ERR_EPERM: return (EACCES); case MC_CMD_ERR_ENOENT: return (ENOENT); case MC_CMD_ERR_EINTR: return (EINTR); case MC_CMD_ERR_EACCES: return (EACCES); case MC_CMD_ERR_EBUSY: return (EBUSY); case MC_CMD_ERR_EINVAL: return (EINVAL); case MC_CMD_ERR_EDEADLK: return (EDEADLK); case MC_CMD_ERR_ENOSYS: return (ENOTSUP); case MC_CMD_ERR_ETIME: return (ETIMEDOUT); case MC_CMD_ERR_ENOTSUP: return (ENOTSUP); case MC_CMD_ERR_EALREADY: return (EALREADY); /* MCDI v2 */ case MC_CMD_ERR_EEXIST: return (EEXIST); #ifdef MC_CMD_ERR_EAGAIN case MC_CMD_ERR_EAGAIN: return (EAGAIN); #endif #ifdef MC_CMD_ERR_ENOSPC case MC_CMD_ERR_ENOSPC: return (ENOSPC); #endif case MC_CMD_ERR_ERANGE: return (ERANGE); case MC_CMD_ERR_ALLOC_FAIL: return (ENOMEM); case MC_CMD_ERR_NO_VADAPTOR: return (ENOENT); case MC_CMD_ERR_NO_EVB_PORT: return (ENOENT); case MC_CMD_ERR_NO_VSWITCH: return (ENODEV); case MC_CMD_ERR_VLAN_LIMIT: return (EINVAL); case MC_CMD_ERR_BAD_PCI_FUNC: return (ENODEV); case MC_CMD_ERR_BAD_VLAN_MODE: return (EINVAL); case MC_CMD_ERR_BAD_VSWITCH_TYPE: return (EINVAL); case MC_CMD_ERR_BAD_VPORT_TYPE: return (EINVAL); case MC_CMD_ERR_MAC_EXIST: return (EEXIST); case MC_CMD_ERR_PROXY_PENDING: return (EAGAIN); default: EFSYS_PROBE1(mc_pcol_error, int, err); return (EIO); } } void efx_mcdi_raise_exception( __in efx_nic_t *enp, __in_opt efx_mcdi_req_t *emrp, __in int rc) { const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; efx_mcdi_exception_t exception; /* Reboot or Assertion failure only */ EFSYS_ASSERT(rc == EIO || rc == EINTR); /* * If MC_CMD_REBOOT causes a reboot (dependent on parameters), * then the EIO is not worthy of an exception. */ if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO) return; exception = (rc == EIO) ? EFX_MCDI_EXCEPTION_MC_REBOOT : EFX_MCDI_EXCEPTION_MC_BADASSERT; emtp->emt_exception(emtp->emt_context, exception); } void efx_mcdi_execute( __in efx_nic_t *enp, __inout efx_mcdi_req_t *emrp) { const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); emrp->emr_quiet = B_FALSE; emtp->emt_execute(emtp->emt_context, emrp); } void efx_mcdi_execute_quiet( __in efx_nic_t *enp, __inout efx_mcdi_req_t *emrp) { const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); emrp->emr_quiet = B_TRUE; emtp->emt_execute(emtp->emt_context, emrp); } void efx_mcdi_ev_cpl( __in efx_nic_t *enp, __in unsigned int seq, __in unsigned int outlen, __in int errcode) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; efx_mcdi_req_t *emrp; efsys_lock_state_t state; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); /* * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start() * when we're completing an aborted request. */ EFSYS_LOCK(enp->en_eslp, state); if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl || (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) { EFSYS_ASSERT(emip->emi_aborted > 0); if (emip->emi_aborted > 0) --emip->emi_aborted; EFSYS_UNLOCK(enp->en_eslp, state); return; } emrp = emip->emi_pending_req; emip->emi_pending_req = NULL; EFSYS_UNLOCK(enp->en_eslp, state); if (emip->emi_max_version >= 2) { /* MCDIv2 response details do not fit into an event. */ efx_mcdi_read_response_header(enp, emrp); } else { if (errcode != 0) { if (!emrp->emr_quiet) { EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd, int, errcode); } emrp->emr_out_length_used = 0; emrp->emr_rc = efx_mcdi_request_errcode(errcode); } else { emrp->emr_out_length_used = outlen; emrp->emr_rc = 0; } } if (emrp->emr_rc == 0) efx_mcdi_finish_response(enp, emrp); emtp->emt_ev_cpl(emtp->emt_context); } #if EFSYS_OPT_MCDI_PROXY_AUTH __checkReturn efx_rc_t efx_mcdi_get_proxy_handle( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __out uint32_t *handlep) { efx_rc_t rc; + _NOTE(ARGUNUSED(enp)) + /* * Return proxy handle from MCDI request that returned with error * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching * PROXY_RESPONSE event. */ if ((emrp == NULL) || (handlep == NULL)) { rc = EINVAL; goto fail1; } if ((emrp->emr_rc != 0) && (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) { *handlep = emrp->emr_proxy_handle; rc = 0; } else { *handlep = 0; rc = ENOENT; } return (rc); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_mcdi_ev_proxy_response( __in efx_nic_t *enp, __in unsigned int handle, __in unsigned int status) { const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; efx_rc_t rc; /* * Handle results of an authorization request for a privileged MCDI * command. If authorization was granted then we must re-issue the * original MCDI request. If authorization failed or timed out, * then the original MCDI request should be completed with the * result code from this event. */ rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status); emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc); } #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ void efx_mcdi_ev_death( __in efx_nic_t *enp, __in int rc) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; efx_mcdi_req_t *emrp = NULL; boolean_t ev_cpl; efsys_lock_state_t state; /* * The MCDI request (if there is one) has been terminated, either * by a BADASSERT or REBOOT event. * * If there is an outstanding event-completed MCDI operation, then we * will never receive the completion event (because both MCDI * completions and BADASSERT events are sent to the same evq). So * complete this MCDI op. * * This function might run in parallel with efx_mcdi_request_poll() * for poll completed mcdi requests, and also with * efx_mcdi_request_start() for post-watchdog completions. */ EFSYS_LOCK(enp->en_eslp, state); emrp = emip->emi_pending_req; ev_cpl = emip->emi_ev_cpl; if (emrp != NULL && emip->emi_ev_cpl) { emip->emi_pending_req = NULL; emrp->emr_out_length_used = 0; emrp->emr_rc = rc; ++emip->emi_aborted; } /* * Since we're running in parallel with a request, consume the * status word before dropping the lock. */ if (rc == EIO || rc == EINTR) { EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); (void) efx_mcdi_poll_reboot(enp); emip->emi_new_epoch = B_TRUE; } EFSYS_UNLOCK(enp->en_eslp, state); efx_mcdi_raise_exception(enp, emrp, rc); if (emrp != NULL && ev_cpl) emtp->emt_ev_cpl(emtp->emt_context); } __checkReturn efx_rc_t efx_mcdi_version( __in efx_nic_t *enp, __out_ecount_opt(4) uint16_t versionp[4], __out_opt uint32_t *buildp, __out_opt efx_mcdi_boot_t *statusp) { efx_mcdi_req_t req; uint8_t payload[MAX(MAX(MC_CMD_GET_VERSION_IN_LEN, MC_CMD_GET_VERSION_OUT_LEN), MAX(MC_CMD_GET_BOOT_STATUS_IN_LEN, MC_CMD_GET_BOOT_STATUS_OUT_LEN))]; efx_word_t *ver_words; uint16_t version[4]; uint32_t build; efx_mcdi_boot_t status; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_VERSION; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } /* bootrom support */ if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) { version[0] = version[1] = version[2] = version[3] = 0; build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE); goto version; } if (req.emr_out_length_used < MC_CMD_GET_VERSION_OUT_LEN) { rc = EMSGSIZE; goto fail2; } ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION); version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0); version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0); version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0); version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0); build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE); version: /* The bootrom doesn't understand BOOT_STATUS */ if (MC_FW_VERSION_IS_BOOTLOADER(build)) { status = EFX_MCDI_BOOT_ROM; goto out; } (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_BOOT_STATUS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN; efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc == EACCES) { /* Unprivileged functions cannot access BOOT_STATUS */ status = EFX_MCDI_BOOT_PRIMARY; version[0] = version[1] = version[2] = version[3] = 0; build = 0; goto out; } if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) { rc = EMSGSIZE; goto fail4; } if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS, GET_BOOT_STATUS_OUT_FLAGS_PRIMARY)) status = EFX_MCDI_BOOT_PRIMARY; else status = EFX_MCDI_BOOT_SECONDARY; out: if (versionp != NULL) memcpy(versionp, version, sizeof (version)); if (buildp != NULL) *buildp = build; if (statusp != NULL) *statusp = status; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_get_capabilities( __in efx_nic_t *enp, __out_opt uint32_t *flagsp, __out_opt uint16_t *rx_dpcpu_fw_idp, __out_opt uint16_t *tx_dpcpu_fw_idp, __out_opt uint32_t *flags2p, __out_opt uint32_t *tso2ncp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)]; boolean_t v2_capable; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_CAPABILITIES; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN; efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) { rc = EMSGSIZE; goto fail2; } if (flagsp != NULL) *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1); if (rx_dpcpu_fw_idp != NULL) *rx_dpcpu_fw_idp = MCDI_OUT_WORD(req, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); if (tx_dpcpu_fw_idp != NULL) *tx_dpcpu_fw_idp = MCDI_OUT_WORD(req, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) v2_capable = B_FALSE; else v2_capable = B_TRUE; if (flags2p != NULL) { *flags2p = (v2_capable) ? MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2) : 0; } if (tso2ncp != NULL) { *tso2ncp = (v2_capable) ? MCDI_OUT_WORD(req, GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS) : 0; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_do_reboot( __in efx_nic_t *enp, __in boolean_t after_assertion) { uint8_t payload[MAX(MC_CMD_REBOOT_IN_LEN, MC_CMD_REBOOT_OUT_LEN)]; efx_mcdi_req_t req; efx_rc_t rc; /* * We could require the caller to have caused en_mod_flags=0 to * call this function. This doesn't help the other port though, * who's about to get the MC ripped out from underneath them. * Since they have to cope with the subsequent fallout of MCDI * failures, we should as well. */ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_REBOOT; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_REBOOT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_REBOOT_OUT_LEN; MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS, (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0)); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc == EACCES) { /* Unprivileged functions cannot reboot the MC. */ goto out; } /* A successful reboot request returns EIO. */ if (req.emr_rc != 0 && req.emr_rc != EIO) { rc = req.emr_rc; goto fail1; } out: return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_reboot( __in efx_nic_t *enp) { return (efx_mcdi_do_reboot(enp, B_FALSE)); } __checkReturn efx_rc_t efx_mcdi_exit_assertion_handler( __in efx_nic_t *enp) { return (efx_mcdi_do_reboot(enp, B_TRUE)); } __checkReturn efx_rc_t efx_mcdi_read_assertion( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_ASSERTS_IN_LEN, MC_CMD_GET_ASSERTS_OUT_LEN)]; const char *reason; unsigned int flags; unsigned int index; unsigned int ofst; int retry; efx_rc_t rc; /* * Before we attempt to chat to the MC, we should verify that the MC * isn't in its assertion handler, either due to a previous reboot, * or because we're reinitializing due to an eec_exception(). * * Use GET_ASSERTS to read any assertion state that may be present. * Retry this command twice. Once because a boot-time assertion failure * might cause the 1st MCDI request to fail. And once again because * we might race with efx_mcdi_exit_assertion_handler() running on * partner port(s) on the same NIC. */ retry = 2; do { (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_ASSERTS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN; MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1); efx_mcdi_execute_quiet(enp, &req); } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0); if (req.emr_rc != 0) { if (req.emr_rc == EACCES) { /* Unprivileged functions cannot clear assertions. */ goto out; } rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) { rc = EMSGSIZE; goto fail2; } /* Print out any assertion state recorded */ flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS); if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) return (0); reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) ? "system-level assertion" : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) ? "thread-level assertion" : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) ? "watchdog reset" : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP) ? "illegal address trap" : "unknown assertion"; EFSYS_PROBE3(mcpu_assertion, const char *, reason, unsigned int, MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS), unsigned int, MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS)); /* Print out the registers (r1 ... r31) */ ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; for (index = 1; index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; index++) { EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int, EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst), EFX_DWORD_0)); ofst += sizeof (efx_dword_t); } EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN); out: return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Internal routines for for specific MCDI requests. */ __checkReturn efx_rc_t efx_mcdi_drv_attach( __in efx_nic_t *enp, __in boolean_t attach) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_DRV_ATTACH_IN_LEN, MC_CMD_DRV_ATTACH_EXT_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_DRV_ATTACH; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN; /* * Use DONT_CARE for the datapath firmware type to ensure that the * driver can attach to an unprivileged function. The datapath firmware * type to use is controlled by the 'sfboot' utility. */ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_NEW_STATE, attach ? 1 : 0); MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1); MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_DONT_CARE); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) { rc = EMSGSIZE; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_get_board_cfg( __in efx_nic_t *enp, __out_opt uint32_t *board_typep, __out_opt efx_dword_t *capabilitiesp, __out_ecount_opt(6) uint8_t mac_addrp[6]) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN, MC_CMD_GET_BOARD_CFG_OUT_LENMIN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_BOARD_CFG; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { rc = EMSGSIZE; goto fail2; } if (mac_addrp != NULL) { uint8_t *addrp; if (emip->emi_port == 1) { addrp = MCDI_OUT2(req, uint8_t, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0); } else if (emip->emi_port == 2) { addrp = MCDI_OUT2(req, uint8_t, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1); } else { rc = EINVAL; goto fail3; } EFX_MAC_ADDR_COPY(mac_addrp, addrp); } if (capabilitiesp != NULL) { if (emip->emi_port == 1) { *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); } else if (emip->emi_port == 2) { *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); } else { rc = EINVAL; goto fail4; } } if (board_typep != NULL) { *board_typep = MCDI_OUT_DWORD(req, GET_BOARD_CFG_OUT_BOARD_TYPE); } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_get_resource_limits( __in efx_nic_t *enp, __out_opt uint32_t *nevqp, __out_opt uint32_t *nrxqp, __out_opt uint32_t *ntxqp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_RESOURCE_LIMITS_IN_LEN, MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) { rc = EMSGSIZE; goto fail2; } if (nevqp != NULL) *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ); if (nrxqp != NULL) *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ); if (ntxqp != NULL) *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_get_phy_cfg( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN, MC_CMD_GET_PHY_CFG_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PHY_CFG; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) { rc = EMSGSIZE; goto fail2; } encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE); #if EFSYS_OPT_NAMES (void) strncpy(encp->enc_phy_name, MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME), MIN(sizeof (encp->enc_phy_name) - 1, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN)); #endif /* EFSYS_OPT_NAMES */ (void) memset(encp->enc_phy_revision, 0, sizeof (encp->enc_phy_revision)); memcpy(encp->enc_phy_revision, MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION), MIN(sizeof (encp->enc_phy_revision) - 1, MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN)); #if EFSYS_OPT_PHY_LED_CONTROL encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) | (1 << EFX_PHY_LED_OFF) | (1 << EFX_PHY_LED_ON)); #endif /* EFSYS_OPT_PHY_LED_CONTROL */ /* Get the media type of the fixed port, if recognised. */ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI); EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4); EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4); EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP); EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS); EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T); EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS); epp->ep_fixed_port_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE); if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES) epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID; epp->ep_phy_cap_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP); #if EFSYS_OPT_PHY_FLAGS encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS); #endif /* EFSYS_OPT_PHY_FLAGS */ encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT); /* Populate internal state */ encp->enc_mcdi_mdio_channel = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL); #if EFSYS_OPT_PHY_STATS encp->enc_mcdi_phy_stat_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK); #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_BIST encp->enc_bist_mask = 0; if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, GET_PHY_CFG_OUT_BIST_CABLE_SHORT)) encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT); if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, GET_PHY_CFG_OUT_BIST_CABLE_LONG)) encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG); if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, GET_PHY_CFG_OUT_BIST)) encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL); #endif /* EFSYS_OPT_BIST */ return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_firmware_update_supported( __in efx_nic_t *enp, __out boolean_t *supportedp) { const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; efx_rc_t rc; if (emcop != NULL) { if ((rc = emcop->emco_feature_supported(enp, EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0) goto fail1; } else { /* Earlier devices always supported updates */ *supportedp = B_TRUE; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_macaddr_change_supported( __in efx_nic_t *enp, __out boolean_t *supportedp) { const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; efx_rc_t rc; if (emcop != NULL) { if ((rc = emcop->emco_feature_supported(enp, EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0) goto fail1; } else { /* Earlier devices always supported MAC changes */ *supportedp = B_TRUE; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_link_control_supported( __in efx_nic_t *enp, __out boolean_t *supportedp) { const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; efx_rc_t rc; if (emcop != NULL) { if ((rc = emcop->emco_feature_supported(enp, EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0) goto fail1; } else { /* Earlier devices always supported link control */ *supportedp = B_TRUE; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_mac_spoofing_supported( __in efx_nic_t *enp, __out boolean_t *supportedp) { const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; efx_rc_t rc; if (emcop != NULL) { if ((rc = emcop->emco_feature_supported(enp, EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0) goto fail1; } else { /* Earlier devices always supported MAC spoofing */ *supportedp = B_TRUE; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_BIST #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD /* * Enter bist offline mode. This is a fw mode which puts the NIC into a state * where memory BIST tests can be run and not much else can interfere or happen. * A reboot is required to exit this mode. */ __checkReturn efx_rc_t efx_mcdi_bist_enable_offline( __in efx_nic_t *enp) { efx_mcdi_req_t req; efx_rc_t rc; EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0); EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0); req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST; req.emr_in_buf = NULL; req.emr_in_length = 0; req.emr_out_buf = NULL; req.emr_out_length = 0; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_mcdi_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_START_BIST_IN_LEN, MC_CMD_START_BIST_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_START_BIST; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_START_BIST_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_START_BIST_OUT_LEN; switch (type) { case EFX_BIST_TYPE_PHY_NORMAL: MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST); break; case EFX_BIST_TYPE_PHY_CABLE_SHORT: MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST_CABLE_SHORT); break; case EFX_BIST_TYPE_PHY_CABLE_LONG: MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST_CABLE_LONG); break; case EFX_BIST_TYPE_MC_MEM: MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_MC_MEM_BIST); break; case EFX_BIST_TYPE_SAT_MEM: MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PORT_MEM_BIST); break; case EFX_BIST_TYPE_REG: MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_REG_BIST); break; default: EFSYS_ASSERT(0); } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_BIST */ /* Enable logging of some events (e.g. link state changes) */ __checkReturn efx_rc_t efx_mcdi_log_ctrl( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_LOG_CTRL_IN_LEN, MC_CMD_LOG_CTRL_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LOG_CTRL; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN; MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST, MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ); MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_MAC_STATS typedef enum efx_stats_action_e { EFX_STATS_CLEAR, EFX_STATS_UPLOAD, EFX_STATS_ENABLE_NOEVENTS, EFX_STATS_ENABLE_EVENTS, EFX_STATS_DISABLE, } efx_stats_action_t; static __checkReturn efx_rc_t efx_mcdi_mac_stats( __in efx_nic_t *enp, __in_opt efsys_mem_t *esmp, __in efx_stats_action_t action, __in uint16_t period_ms) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN, MC_CMD_MAC_STATS_OUT_DMA_LEN)]; int clear = (action == EFX_STATS_CLEAR); int upload = (action == EFX_STATS_UPLOAD); int enable = (action == EFX_STATS_ENABLE_NOEVENTS); int events = (action == EFX_STATS_ENABLE_EVENTS); int disable = (action == EFX_STATS_DISABLE); efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_MAC_STATS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_MAC_STATS_OUT_DMA_LEN; MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD, MAC_STATS_IN_DMA, upload, MAC_STATS_IN_CLEAR, clear, MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable, MAC_STATS_IN_PERIODIC_ENABLE, enable | events, MAC_STATS_IN_PERIODIC_NOEVENT, !events, MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0); if (esmp != NULL) { int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t); EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <= EFX_MAC_STATS_SIZE); MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO, EFSYS_MEM_ADDR(esmp) & 0xffffffff); MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI, EFSYS_MEM_ADDR(esmp) >> 32); MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes); } else { EFSYS_ASSERT(!upload && !enable && !events); } /* * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats, * as this may fail (and leave periodic DMA enabled) if the * vadapter has already been deleted. */ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID, (disable ? EVB_PORT_ID_NULL : enp->en_vport_id)); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { /* EF10: Expect ENOENT if no DMA queues are initialised */ if ((req.emr_rc != ENOENT) || (enp->en_rx_qcount + enp->en_tx_qcount != 0)) { rc = req.emr_rc; goto fail1; } } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_mac_stats_clear( __in efx_nic_t *enp) { efx_rc_t rc; if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR, 0)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_mac_stats_upload( __in efx_nic_t *enp, __in efsys_mem_t *esmp) { efx_rc_t rc; /* * The MC DMAs aggregate statistics for our convenience, so we can * avoid having to pull the statistics buffer into the cache to * maintain cumulative statistics. */ if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD, 0)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_mac_stats_periodic( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __in uint16_t period_ms, __in boolean_t events) { efx_rc_t rc; /* * The MC DMAs aggregate statistics for our convenience, so we can * avoid having to pull the statistics buffer into the cache to * maintain cumulative statistics. * Huntington uses a fixed 1sec period. * Medford uses a fixed 1sec period before v6.2.1.1033 firmware. */ if (period_ms == 0) rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE, 0); else if (events) rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS, period_ms); else rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS, period_ms); if (rc != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_MAC_STATS */ #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD /* * This function returns the pf and vf number of a function. If it is a pf the * vf number is 0xffff. The vf number is the index of the vf on that * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0), * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff). */ __checkReturn efx_rc_t efx_mcdi_get_function_info( __in efx_nic_t *enp, __out uint32_t *pfp, __out_opt uint32_t *vfp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_FUNCTION_INFO_IN_LEN, MC_CMD_GET_FUNCTION_INFO_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_FUNCTION_INFO; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF); if (vfp != NULL) *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_privilege_mask( __in efx_nic_t *enp, __in uint32_t pf, __in uint32_t vf, __out uint32_t *maskp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_PRIVILEGE_MASK_IN_LEN, MC_CMD_PRIVILEGE_MASK_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_PRIVILEGE_MASK; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN; MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION, PRIVILEGE_MASK_IN_FUNCTION_PF, pf, PRIVILEGE_MASK_IN_FUNCTION_VF, vf); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_mcdi_set_workaround( __in efx_nic_t *enp, __in uint32_t type, __in boolean_t enabled, __out_opt uint32_t *flagsp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_WORKAROUND_IN_LEN, MC_CMD_WORKAROUND_EXT_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_WORKAROUND; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN; MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type); MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (flagsp != NULL) { if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN) *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS); else *flagsp = 0; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_mcdi_get_workarounds( __in efx_nic_t *enp, __out_opt uint32_t *implementedp, __out_opt uint32_t *enabledp) { efx_mcdi_req_t req; uint8_t payload[MC_CMD_GET_WORKAROUNDS_OUT_LEN]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_WORKAROUNDS; req.emr_in_buf = NULL; req.emr_in_length = 0; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (implementedp != NULL) { *implementedp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED); } if (enabledp != NULL) { *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED); } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * Size of media information page in accordance with SFF-8472 and SFF-8436. * It is used in MCDI interface as well. */ #define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80 static __checkReturn efx_rc_t efx_mcdi_get_phy_media_info( __in efx_nic_t *enp, __in uint32_t mcdi_page, __in uint8_t offset, __in uint8_t len, __out_bcount(len) uint8_t *data) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN( EFX_PHY_MEDIA_INFO_PAGE_SIZE))]; efx_rc_t rc; EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE); MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used != MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) { rc = EMSGSIZE; goto fail2; } if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) != EFX_PHY_MEDIA_INFO_PAGE_SIZE) { rc = EIO; goto fail3; } memcpy(data, MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset, len); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } /* * 2-wire device address of the base information in accordance with SFF-8472 * Diagnostic Monitoring Interface for Optical Transceivers section * 4 Memory Organization. */ #define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE 0xA0 /* * 2-wire device address of the digital diagnostics monitoring interface * in accordance with SFF-8472 Diagnostic Monitoring Interface for Optical * Transceivers section 4 Memory Organization. */ #define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM 0xA2 /* * Hard wired 2-wire device address for QSFP+ in accordance with SFF-8436 * QSFP+ 10 Gbs 4X PLUGGABLE TRANSCEIVER section 7.4 Device Addressing and * Operation. */ #define EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP 0xA0 __checkReturn efx_rc_t efx_mcdi_phy_module_get_info( __in efx_nic_t *enp, __in uint8_t dev_addr, __in uint8_t offset, __in uint8_t len, __out_bcount(len) uint8_t *data) { efx_port_t *epp = &(enp->en_port); efx_rc_t rc; uint32_t mcdi_lower_page; uint32_t mcdi_upper_page; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); /* * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages. * Offset plus length interface allows to access page 0 only. * I.e. non-zero upper pages are not accessible. * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6 * QSFP+ Memory Map for details on how information is structured * and accessible. */ switch (epp->ep_fixed_port_type) { case EFX_PHY_MEDIA_SFP_PLUS: /* * In accordance with SFF-8472 Diagnostic Monitoring * Interface for Optical Transceivers section 4 Memory * Organization two 2-wire addresses are defined. */ switch (dev_addr) { /* Base information */ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE: /* * MCDI page 0 should be used to access lower * page 0 (0x00 - 0x7f) at the device address 0xA0. */ mcdi_lower_page = 0; /* * MCDI page 1 should be used to access upper * page 0 (0x80 - 0xff) at the device address 0xA0. */ mcdi_upper_page = 1; break; /* Diagnostics */ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM: /* * MCDI page 2 should be used to access lower * page 0 (0x00 - 0x7f) at the device address 0xA2. */ mcdi_lower_page = 2; /* * MCDI page 3 should be used to access upper * page 0 (0x80 - 0xff) at the device address 0xA2. */ mcdi_upper_page = 3; break; default: rc = ENOTSUP; goto fail1; } break; case EFX_PHY_MEDIA_QSFP_PLUS: switch (dev_addr) { case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP: /* * MCDI page -1 should be used to access lower page 0 * (0x00 - 0x7f). */ mcdi_lower_page = (uint32_t)-1; /* * MCDI page 0 should be used to access upper page 0 * (0x80h - 0xff). */ mcdi_upper_page = 0; break; default: rc = ENOTSUP; goto fail1; } break; default: rc = ENOTSUP; goto fail1; } if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) { uint8_t read_len = MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset); rc = efx_mcdi_get_phy_media_info(enp, mcdi_lower_page, offset, read_len, data); if (rc != 0) goto fail2; data += read_len; len -= read_len; offset = 0; } else { offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE; } if (len > 0) { EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE); EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE); rc = efx_mcdi_get_phy_media_info(enp, mcdi_upper_page, offset, len, data); if (rc != 0) goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_MCDI */ Index: projects/clang700-import/sys/dev/sfxge/common/efx_mcdi.h =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_mcdi.h (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_mcdi.h (revision 340918) @@ -1,419 +1,419 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_EFX_MCDI_H #define _SYS_EFX_MCDI_H #include "efx.h" #include "efx_regs_mcdi.h" #ifdef __cplusplus extern "C" { #endif /* * A reboot/assertion causes the MCDI status word to be set after the * command word is set or a REBOOT event is sent. If we notice a reboot * via these mechanisms then wait 10ms for the status word to be set. */ #define EFX_MCDI_STATUS_SLEEP_US 10000 struct efx_mcdi_req_s { boolean_t emr_quiet; /* Inputs: Command #, input buffer and length */ unsigned int emr_cmd; uint8_t *emr_in_buf; size_t emr_in_length; - /* Outputs: retcode, buffer, length, and length used*/ + /* Outputs: retcode, buffer, length, and length used */ efx_rc_t emr_rc; uint8_t *emr_out_buf; size_t emr_out_length; size_t emr_out_length_used; /* Internals: low level transport details */ unsigned int emr_err_code; unsigned int emr_err_arg; #if EFSYS_OPT_MCDI_PROXY_AUTH uint32_t emr_proxy_handle; #endif }; typedef struct efx_mcdi_iface_s { unsigned int emi_port; unsigned int emi_max_version; unsigned int emi_seq; efx_mcdi_req_t *emi_pending_req; boolean_t emi_ev_cpl; boolean_t emi_new_epoch; int emi_aborted; uint32_t emi_poll_cnt; uint32_t emi_mc_reboot_status; } efx_mcdi_iface_t; extern void efx_mcdi_execute( __in efx_nic_t *enp, __inout efx_mcdi_req_t *emrp); extern void efx_mcdi_execute_quiet( __in efx_nic_t *enp, __inout efx_mcdi_req_t *emrp); extern void efx_mcdi_ev_cpl( __in efx_nic_t *enp, __in unsigned int seq, __in unsigned int outlen, __in int errcode); #if EFSYS_OPT_MCDI_PROXY_AUTH extern __checkReturn efx_rc_t efx_mcdi_get_proxy_handle( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __out uint32_t *handlep); extern void efx_mcdi_ev_proxy_response( __in efx_nic_t *enp, __in unsigned int handle, __in unsigned int status); #endif extern void efx_mcdi_ev_death( __in efx_nic_t *enp, __in int rc); extern __checkReturn efx_rc_t efx_mcdi_request_errcode( __in unsigned int err); extern void efx_mcdi_raise_exception( __in efx_nic_t *enp, __in_opt efx_mcdi_req_t *emrp, __in int rc); typedef enum efx_mcdi_boot_e { EFX_MCDI_BOOT_PRIMARY, EFX_MCDI_BOOT_SECONDARY, EFX_MCDI_BOOT_ROM, } efx_mcdi_boot_t; extern __checkReturn efx_rc_t efx_mcdi_version( __in efx_nic_t *enp, __out_ecount_opt(4) uint16_t versionp[4], __out_opt uint32_t *buildp, __out_opt efx_mcdi_boot_t *statusp); extern __checkReturn efx_rc_t efx_mcdi_get_capabilities( __in efx_nic_t *enp, __out_opt uint32_t *flagsp, __out_opt uint16_t *rx_dpcpu_fw_idp, __out_opt uint16_t *tx_dpcpu_fw_idp, __out_opt uint32_t *flags2p, __out_opt uint32_t *tso2ncp); extern __checkReturn efx_rc_t efx_mcdi_read_assertion( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_mcdi_exit_assertion_handler( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_mcdi_drv_attach( __in efx_nic_t *enp, __in boolean_t attach); extern __checkReturn efx_rc_t efx_mcdi_get_board_cfg( __in efx_nic_t *enp, __out_opt uint32_t *board_typep, __out_opt efx_dword_t *capabilitiesp, __out_ecount_opt(6) uint8_t mac_addrp[6]); extern __checkReturn efx_rc_t efx_mcdi_get_phy_cfg( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_mcdi_firmware_update_supported( __in efx_nic_t *enp, __out boolean_t *supportedp); extern __checkReturn efx_rc_t efx_mcdi_macaddr_change_supported( __in efx_nic_t *enp, __out boolean_t *supportedp); extern __checkReturn efx_rc_t efx_mcdi_link_control_supported( __in efx_nic_t *enp, __out boolean_t *supportedp); extern __checkReturn efx_rc_t efx_mcdi_mac_spoofing_supported( __in efx_nic_t *enp, __out boolean_t *supportedp); #if EFSYS_OPT_BIST #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD extern __checkReturn efx_rc_t efx_mcdi_bist_enable_offline( __in efx_nic_t *enp); #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ extern __checkReturn efx_rc_t efx_mcdi_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type); #endif /* EFSYS_OPT_BIST */ extern __checkReturn efx_rc_t efx_mcdi_get_resource_limits( __in efx_nic_t *enp, __out_opt uint32_t *nevqp, __out_opt uint32_t *nrxqp, __out_opt uint32_t *ntxqp); extern __checkReturn efx_rc_t efx_mcdi_log_ctrl( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_mcdi_mac_stats_clear( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_mcdi_mac_stats_upload( __in efx_nic_t *enp, __in efsys_mem_t *esmp); extern __checkReturn efx_rc_t efx_mcdi_mac_stats_periodic( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __in uint16_t period_ms, __in boolean_t events); #if EFSYS_OPT_LOOPBACK extern __checkReturn efx_rc_t efx_mcdi_get_loopback_modes( __in efx_nic_t *enp); #endif /* EFSYS_OPT_LOOPBACK */ extern __checkReturn efx_rc_t efx_mcdi_phy_module_get_info( __in efx_nic_t *enp, __in uint8_t dev_addr, __in uint8_t offset, __in uint8_t len, __out_bcount(len) uint8_t *data); #define MCDI_IN(_emr, _type, _ofst) \ ((_type *)((_emr).emr_in_buf + (_ofst))) #define MCDI_IN2(_emr, _type, _ofst) \ MCDI_IN(_emr, _type, MC_CMD_ ## _ofst ## _OFST) #define MCDI_IN_SET_BYTE(_emr, _ofst, _value) \ EFX_POPULATE_BYTE_1(*MCDI_IN2(_emr, efx_byte_t, _ofst), \ EFX_BYTE_0, _value) #define MCDI_IN_SET_WORD(_emr, _ofst, _value) \ EFX_POPULATE_WORD_1(*MCDI_IN2(_emr, efx_word_t, _ofst), \ EFX_WORD_0, _value) #define MCDI_IN_SET_DWORD(_emr, _ofst, _value) \ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ EFX_DWORD_0, _value) #define MCDI_IN_SET_DWORD_FIELD(_emr, _ofst, _field, _value) \ EFX_SET_DWORD_FIELD(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field, _value) #define MCDI_IN_POPULATE_DWORD_1(_emr, _ofst, _field1, _value1) \ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1) #define MCDI_IN_POPULATE_DWORD_2(_emr, _ofst, _field1, _value1, \ _field2, _value2) \ EFX_POPULATE_DWORD_2(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2) #define MCDI_IN_POPULATE_DWORD_3(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3) \ EFX_POPULATE_DWORD_3(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3) #define MCDI_IN_POPULATE_DWORD_4(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4) \ EFX_POPULATE_DWORD_4(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4) #define MCDI_IN_POPULATE_DWORD_5(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4, \ _field5, _value5) \ EFX_POPULATE_DWORD_5(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4, \ MC_CMD_ ## _field5, _value5) #define MCDI_IN_POPULATE_DWORD_6(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6) \ EFX_POPULATE_DWORD_6(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4, \ MC_CMD_ ## _field5, _value5, \ MC_CMD_ ## _field6, _value6) #define MCDI_IN_POPULATE_DWORD_7(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, _field7, _value7) \ EFX_POPULATE_DWORD_7(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4, \ MC_CMD_ ## _field5, _value5, \ MC_CMD_ ## _field6, _value6, \ MC_CMD_ ## _field7, _value7) #define MCDI_IN_POPULATE_DWORD_8(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, _field7, _value7, \ _field8, _value8) \ EFX_POPULATE_DWORD_8(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4, \ MC_CMD_ ## _field5, _value5, \ MC_CMD_ ## _field6, _value6, \ MC_CMD_ ## _field7, _value7, \ MC_CMD_ ## _field8, _value8) #define MCDI_IN_POPULATE_DWORD_9(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, _field7, _value7, \ _field8, _value8, _field9, _value9) \ EFX_POPULATE_DWORD_9(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4, \ MC_CMD_ ## _field5, _value5, \ MC_CMD_ ## _field6, _value6, \ MC_CMD_ ## _field7, _value7, \ MC_CMD_ ## _field8, _value8, \ MC_CMD_ ## _field9, _value9) #define MCDI_IN_POPULATE_DWORD_10(_emr, _ofst, _field1, _value1, \ _field2, _value2, _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, _field7, _value7, \ _field8, _value8, _field9, _value9, _field10, _value10) \ EFX_POPULATE_DWORD_10(*MCDI_IN2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field1, _value1, \ MC_CMD_ ## _field2, _value2, \ MC_CMD_ ## _field3, _value3, \ MC_CMD_ ## _field4, _value4, \ MC_CMD_ ## _field5, _value5, \ MC_CMD_ ## _field6, _value6, \ MC_CMD_ ## _field7, _value7, \ MC_CMD_ ## _field8, _value8, \ MC_CMD_ ## _field9, _value9, \ MC_CMD_ ## _field10, _value10) #define MCDI_OUT(_emr, _type, _ofst) \ ((_type *)((_emr).emr_out_buf + (_ofst))) #define MCDI_OUT2(_emr, _type, _ofst) \ MCDI_OUT(_emr, _type, MC_CMD_ ## _ofst ## _OFST) #define MCDI_OUT_BYTE(_emr, _ofst) \ EFX_BYTE_FIELD(*MCDI_OUT2(_emr, efx_byte_t, _ofst), \ EFX_BYTE_0) #define MCDI_OUT_WORD(_emr, _ofst) \ EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \ EFX_WORD_0) #define MCDI_OUT_DWORD(_emr, _ofst) \ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \ EFX_DWORD_0) #define MCDI_OUT_DWORD_FIELD(_emr, _ofst, _field) \ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \ MC_CMD_ ## _field) #define MCDI_EV_FIELD(_eqp, _field) \ EFX_QWORD_FIELD(*_eqp, MCDI_EVENT_ ## _field) #define MCDI_CMD_DWORD_FIELD(_edp, _field) \ EFX_DWORD_FIELD(*_edp, MC_CMD_ ## _field) #define EFX_MCDI_HAVE_PRIVILEGE(mask, priv) \ (((mask) & (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv)) == \ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv)) typedef enum efx_mcdi_feature_id_e { EFX_MCDI_FEATURE_FW_UPDATE = 0, EFX_MCDI_FEATURE_LINK_CONTROL, EFX_MCDI_FEATURE_MACADDR_CHANGE, EFX_MCDI_FEATURE_MAC_SPOOFING, EFX_MCDI_FEATURE_NIDS } efx_mcdi_feature_id_t; #ifdef __cplusplus } #endif #endif /* _SYS_EFX_MCDI_H */ Index: projects/clang700-import/sys/dev/sfxge/common/efx_mon.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_mon.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_mon.c (revision 340918) @@ -1,260 +1,262 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_MON_MCDI #include "mcdi_mon.h" #endif #if EFSYS_OPT_NAMES static const char * const __efx_mon_name[] = { "", "sfx90x0", "sfx91x0", "sfx92x0" }; const char * efx_mon_name( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID); EFSYS_ASSERT3U(encp->enc_mon_type, <, EFX_MON_NTYPES); return (__efx_mon_name[encp->enc_mon_type]); } #endif /* EFSYS_OPT_NAMES */ #if EFSYS_OPT_MON_MCDI static const efx_mon_ops_t __efx_mon_mcdi_ops = { #if EFSYS_OPT_MON_STATS mcdi_mon_stats_update /* emo_stats_update */ #endif /* EFSYS_OPT_MON_STATS */ }; #endif __checkReturn efx_rc_t efx_mon_init( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mon_t *emp = &(enp->en_mon); const efx_mon_ops_t *emop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); if (enp->en_mod_flags & EFX_MOD_MON) { rc = EINVAL; goto fail1; } enp->en_mod_flags |= EFX_MOD_MON; emp->em_type = encp->enc_mon_type; EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID); switch (emp->em_type) { #if EFSYS_OPT_MON_MCDI case EFX_MON_SFC90X0: case EFX_MON_SFC91X0: case EFX_MON_SFC92X0: emop = &__efx_mon_mcdi_ops; break; #endif default: rc = ENOTSUP; goto fail2; } emp->em_emop = emop; return (0); fail2: EFSYS_PROBE(fail2); emp->em_type = EFX_MON_INVALID; enp->en_mod_flags &= ~EFX_MOD_MON; fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_MON_STATS #if EFSYS_OPT_NAMES -/* START MKCONFIG GENERATED MonitorStatNamesBlock 5daa2a5725ba734b */ +/* START MKCONFIG GENERATED MonitorStatNamesBlock d92af1538001301f */ static const char * const __mon_stat_name[] = { "value_2_5v", "value_vccp1", "value_vcc", "value_5v", "value_12v", "value_vccp2", "value_ext_temp", "value_int_temp", "value_ain1", "value_ain2", "controller_cooling", "ext_cooling", "1v", "1_2v", "1_8v", "3_3v", "1_2va", "vref", "vaoe", "aoe_temperature", "psu_aoe_temperature", "psu_temperature", "fan0", "fan1", "fan2", "fan3", "fan4", "vaoe_in", "iaoe", "iaoe_in", "nic_power", "0_9v", "i0_9v", "i1_2v", "0_9v_adc", "controller_temperature2", "vreg_temperature", "vreg_0_9v_temperature", "vreg_1_2v_temperature", "int_vptat", "controller_internal_adc_temperature", "ext_vptat", "controller_external_adc_temperature", "ambient_temperature", "airflow", "vdd08d_vss08d_csr", "vdd08d_vss08d_csr_extadc", "hotpoint_temperature", "phy_power_switch_port0", "phy_power_switch_port1", "mum_vcc", "0v9_a", "i0v9_a", "0v9_a_temp", "0v9_b", "i0v9_b", "0v9_b_temp", "ccom_avreg_1v2_supply", "ccom_avreg_1v2_supply_ext_adc", "ccom_avreg_1v8_supply", "ccom_avreg_1v8_supply_ext_adc", "controller_master_vptat", "controller_master_internal_temp", "controller_master_vptat_ext_adc", "controller_master_internal_temp_ext_adc", "controller_slave_vptat", "controller_slave_internal_temp", "controller_slave_vptat_ext_adc", "controller_slave_internal_temp_ext_adc", "sodimm_vout", "sodimm_0_temp", "sodimm_1_temp", "phy0_vcc", "phy1_vcc", "controller_tdiode_temp", "board_front_temp", "board_back_temp", + "i1v8", + "i2v5", }; /* END MKCONFIG GENERATED MonitorStatNamesBlock */ extern const char * efx_mon_stat_name( __in efx_nic_t *enp, __in efx_mon_stat_t id) { _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(id, <, EFX_MON_NSTATS); return (__mon_stat_name[id]); } #endif /* EFSYS_OPT_NAMES */ __checkReturn efx_rc_t efx_mon_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values) { efx_mon_t *emp = &(enp->en_mon); const efx_mon_ops_t *emop = emp->em_emop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON); return (emop->emo_stats_update(enp, esmp, values)); } #endif /* EFSYS_OPT_MON_STATS */ void efx_mon_fini( __in efx_nic_t *enp) { efx_mon_t *emp = &(enp->en_mon); EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON); emp->em_emop = NULL; emp->em_type = EFX_MON_INVALID; enp->en_mod_flags &= ~EFX_MOD_MON; } Index: projects/clang700-import/sys/dev/sfxge/common/efx_nic.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_nic.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_nic.c (revision 340918) @@ -1,1115 +1,911 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" __checkReturn efx_rc_t efx_family( __in uint16_t venid, __in uint16_t devid, __out efx_family_t *efp) { if (venid == EFX_PCI_VENID_SFC) { switch (devid) { #if EFSYS_OPT_SIENA case EFX_PCI_DEVID_SIENA_F1_UNINIT: /* * Hardware default for PF0 of uninitialised Siena. * manftest must be able to cope with this device id. */ *efp = EFX_FAMILY_SIENA; return (0); case EFX_PCI_DEVID_BETHPAGE: case EFX_PCI_DEVID_SIENA: *efp = EFX_FAMILY_SIENA; return (0); #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT: /* * Hardware default for PF0 of uninitialised Huntington. * manftest must be able to cope with this device id. */ *efp = EFX_FAMILY_HUNTINGTON; return (0); case EFX_PCI_DEVID_FARMINGDALE: case EFX_PCI_DEVID_GREENPORT: *efp = EFX_FAMILY_HUNTINGTON; return (0); case EFX_PCI_DEVID_FARMINGDALE_VF: case EFX_PCI_DEVID_GREENPORT_VF: *efp = EFX_FAMILY_HUNTINGTON; return (0); #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_PCI_DEVID_MEDFORD_PF_UNINIT: /* * Hardware default for PF0 of uninitialised Medford. * manftest must be able to cope with this device id. */ *efp = EFX_FAMILY_MEDFORD; return (0); case EFX_PCI_DEVID_MEDFORD: *efp = EFX_FAMILY_MEDFORD; return (0); case EFX_PCI_DEVID_MEDFORD_VF: *efp = EFX_FAMILY_MEDFORD; return (0); #endif /* EFSYS_OPT_MEDFORD */ case EFX_PCI_DEVID_FALCON: /* Obsolete, not supported */ default: break; } } *efp = EFX_FAMILY_INVALID; return (ENOTSUP); } - -#define EFX_BIU_MAGIC0 0x01234567 -#define EFX_BIU_MAGIC1 0xfedcba98 - - __checkReturn efx_rc_t -efx_nic_biu_test( - __in efx_nic_t *enp) -{ - efx_oword_t oword; - efx_rc_t rc; - - /* - * Write magic values to scratch registers 0 and 1, then - * verify that the values were written correctly. Interleave - * the accesses to ensure that the BIU is not just reading - * back the cached value that was last written. - */ - EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0); - EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); - - EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1); - EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); - - EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); - if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) { - rc = EIO; - goto fail1; - } - - EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); - if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) { - rc = EIO; - goto fail2; - } - - /* - * Perform the same test, with the values swapped. This - * ensures that subsequent tests don't start with the correct - * values already written into the scratch registers. - */ - EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1); - EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); - - EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0); - EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); - - EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); - if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) { - rc = EIO; - goto fail3; - } - - EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); - if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) { - rc = EIO; - goto fail4; - } - - return (0); - -fail4: - EFSYS_PROBE(fail4); -fail3: - EFSYS_PROBE(fail3); -fail2: - EFSYS_PROBE(fail2); -fail1: - EFSYS_PROBE1(fail1, efx_rc_t, rc); - - return (rc); -} - #if EFSYS_OPT_SIENA static const efx_nic_ops_t __efx_nic_siena_ops = { siena_nic_probe, /* eno_probe */ NULL, /* eno_board_cfg */ NULL, /* eno_set_drv_limits */ siena_nic_reset, /* eno_reset */ siena_nic_init, /* eno_init */ NULL, /* eno_get_vi_pool */ NULL, /* eno_get_bar_region */ #if EFSYS_OPT_DIAG siena_nic_register_test, /* eno_register_test */ #endif /* EFSYS_OPT_DIAG */ siena_nic_fini, /* eno_fini */ siena_nic_unprobe, /* eno_unprobe */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static const efx_nic_ops_t __efx_nic_hunt_ops = { ef10_nic_probe, /* eno_probe */ hunt_board_cfg, /* eno_board_cfg */ ef10_nic_set_drv_limits, /* eno_set_drv_limits */ ef10_nic_reset, /* eno_reset */ ef10_nic_init, /* eno_init */ ef10_nic_get_vi_pool, /* eno_get_vi_pool */ ef10_nic_get_bar_region, /* eno_get_bar_region */ #if EFSYS_OPT_DIAG ef10_nic_register_test, /* eno_register_test */ #endif /* EFSYS_OPT_DIAG */ ef10_nic_fini, /* eno_fini */ ef10_nic_unprobe, /* eno_unprobe */ }; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD static const efx_nic_ops_t __efx_nic_medford_ops = { ef10_nic_probe, /* eno_probe */ medford_board_cfg, /* eno_board_cfg */ ef10_nic_set_drv_limits, /* eno_set_drv_limits */ ef10_nic_reset, /* eno_reset */ ef10_nic_init, /* eno_init */ ef10_nic_get_vi_pool, /* eno_get_vi_pool */ ef10_nic_get_bar_region, /* eno_get_bar_region */ #if EFSYS_OPT_DIAG ef10_nic_register_test, /* eno_register_test */ #endif /* EFSYS_OPT_DIAG */ ef10_nic_fini, /* eno_fini */ ef10_nic_unprobe, /* eno_unprobe */ }; #endif /* EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_nic_create( __in efx_family_t family, __in efsys_identifier_t *esip, __in efsys_bar_t *esbp, __in efsys_lock_t *eslp, __deref_out efx_nic_t **enpp) { efx_nic_t *enp; efx_rc_t rc; EFSYS_ASSERT3U(family, >, EFX_FAMILY_INVALID); EFSYS_ASSERT3U(family, <, EFX_FAMILY_NTYPES); /* Allocate a NIC object */ EFSYS_KMEM_ALLOC(esip, sizeof (efx_nic_t), enp); if (enp == NULL) { rc = ENOMEM; goto fail1; } enp->en_magic = EFX_NIC_MAGIC; switch (family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: enp->en_enop = &__efx_nic_siena_ops; enp->en_features = EFX_FEATURE_IPV6 | EFX_FEATURE_LFSR_HASH_INSERT | EFX_FEATURE_LINK_EVENTS | EFX_FEATURE_PERIODIC_MAC_STATS | EFX_FEATURE_MCDI | EFX_FEATURE_LOOKAHEAD_SPLIT | EFX_FEATURE_MAC_HEADER_FILTERS | EFX_FEATURE_TX_SRC_FILTERS; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: enp->en_enop = &__efx_nic_hunt_ops; enp->en_features = EFX_FEATURE_IPV6 | EFX_FEATURE_LINK_EVENTS | EFX_FEATURE_PERIODIC_MAC_STATS | EFX_FEATURE_MCDI | EFX_FEATURE_MAC_HEADER_FILTERS | EFX_FEATURE_MCDI_DMA | EFX_FEATURE_PIO_BUFFERS | EFX_FEATURE_FW_ASSISTED_TSO | EFX_FEATURE_FW_ASSISTED_TSO_V2 | EFX_FEATURE_PACKED_STREAM; break; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: enp->en_enop = &__efx_nic_medford_ops; /* * FW_ASSISTED_TSO omitted as Medford only supports firmware * assisted TSO version 2, not the v1 scheme used on Huntington. */ enp->en_features = EFX_FEATURE_IPV6 | EFX_FEATURE_LINK_EVENTS | EFX_FEATURE_PERIODIC_MAC_STATS | EFX_FEATURE_MCDI | EFX_FEATURE_MAC_HEADER_FILTERS | EFX_FEATURE_MCDI_DMA | EFX_FEATURE_PIO_BUFFERS | EFX_FEATURE_FW_ASSISTED_TSO_V2 | EFX_FEATURE_PACKED_STREAM; break; #endif /* EFSYS_OPT_MEDFORD */ default: rc = ENOTSUP; goto fail2; } enp->en_family = family; enp->en_esip = esip; enp->en_esbp = esbp; enp->en_eslp = eslp; *enpp = enp; return (0); fail2: EFSYS_PROBE(fail2); enp->en_magic = 0; /* Free the NIC object */ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_nic_probe( __in efx_nic_t *enp) { const efx_nic_ops_t *enop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); #if EFSYS_OPT_MCDI EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); #endif /* EFSYS_OPT_MCDI */ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE)); enop = enp->en_enop; if ((rc = enop->eno_probe(enp)) != 0) goto fail1; if ((rc = efx_phy_probe(enp)) != 0) goto fail2; enp->en_mod_flags |= EFX_MOD_PROBE; return (0); fail2: EFSYS_PROBE(fail2); enop->eno_unprobe(enp); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_nic_set_drv_limits( __inout efx_nic_t *enp, __in efx_drv_limits_t *edlp) { const efx_nic_ops_t *enop = enp->en_enop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); if (enop->eno_set_drv_limits != NULL) { if ((rc = enop->eno_set_drv_limits(enp, edlp)) != 0) goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_nic_get_bar_region( __in efx_nic_t *enp, __in efx_nic_region_t region, __out uint32_t *offsetp, __out size_t *sizep) { const efx_nic_ops_t *enop = enp->en_enop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (enop->eno_get_bar_region == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = (enop->eno_get_bar_region)(enp, region, offsetp, sizep)) != 0) { goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_nic_get_vi_pool( __in efx_nic_t *enp, __out uint32_t *evq_countp, __out uint32_t *rxq_countp, __out uint32_t *txq_countp) { const efx_nic_ops_t *enop = enp->en_enop; efx_nic_cfg_t *encp = &enp->en_nic_cfg; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (enop->eno_get_vi_pool != NULL) { uint32_t vi_count = 0; if ((rc = (enop->eno_get_vi_pool)(enp, &vi_count)) != 0) goto fail1; *evq_countp = vi_count; *rxq_countp = vi_count; *txq_countp = vi_count; } else { /* Use NIC limits as default value */ *evq_countp = encp->enc_evq_limit; *rxq_countp = encp->enc_rxq_limit; *txq_countp = encp->enc_txq_limit; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_nic_init( __in efx_nic_t *enp) { const efx_nic_ops_t *enop = enp->en_enop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); if (enp->en_mod_flags & EFX_MOD_NIC) { rc = EINVAL; goto fail1; } if ((rc = enop->eno_init(enp)) != 0) goto fail2; enp->en_mod_flags |= EFX_MOD_NIC; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_nic_fini( __in efx_nic_t *enp) { const efx_nic_ops_t *enop = enp->en_enop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE); EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_NIC); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); enop->eno_fini(enp); enp->en_mod_flags &= ~EFX_MOD_NIC; } void efx_nic_unprobe( __in efx_nic_t *enp) { const efx_nic_ops_t *enop = enp->en_enop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); #if EFSYS_OPT_MCDI EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); #endif /* EFSYS_OPT_MCDI */ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); efx_phy_unprobe(enp); enop->eno_unprobe(enp); enp->en_mod_flags &= ~EFX_MOD_PROBE; } void efx_nic_destroy( __in efx_nic_t *enp) { efsys_identifier_t *esip = enp->en_esip; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0); enp->en_family = EFX_FAMILY_INVALID; enp->en_esip = NULL; enp->en_esbp = NULL; enp->en_eslp = NULL; enp->en_enop = NULL; enp->en_magic = 0; /* Free the NIC object */ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp); } __checkReturn efx_rc_t efx_nic_reset( __in efx_nic_t *enp) { const efx_nic_ops_t *enop = enp->en_enop; unsigned int mod_flags; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE); /* * All modules except the MCDI, PROBE, NVRAM, VPD, MON * (which we do not reset here) must have been shut down or never * initialized. * * A rule of thumb here is: If the controller or MC reboots, is *any* * state lost. If it's lost and needs reapplying, then the module * *must* not be initialised during the reset. */ mod_flags = enp->en_mod_flags; mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM | EFX_MOD_VPD | EFX_MOD_MON); EFSYS_ASSERT3U(mod_flags, ==, 0); if (mod_flags != 0) { rc = EINVAL; goto fail1; } if ((rc = enop->eno_reset(enp)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } const efx_nic_cfg_t * efx_nic_cfg_get( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); return (&(enp->en_nic_cfg)); } __checkReturn efx_rc_t efx_nic_get_fw_version( __in efx_nic_t *enp, __out efx_nic_fw_info_t *enfip) { uint16_t mc_fw_version[4]; efx_rc_t rc; if (enfip == NULL) { rc = EINVAL; goto fail1; } EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); rc = efx_mcdi_version(enp, mc_fw_version, NULL, NULL); if (rc != 0) goto fail2; rc = efx_mcdi_get_capabilities(enp, NULL, - &enfip->enfi_rx_dpcpu_fw_id, - &enfip->enfi_tx_dpcpu_fw_id, - NULL, NULL); + &enfip->enfi_rx_dpcpu_fw_id, + &enfip->enfi_tx_dpcpu_fw_id, + NULL, NULL); if (rc == 0) { enfip->enfi_dpcpu_fw_ids_valid = B_TRUE; } else if (rc == ENOTSUP) { enfip->enfi_dpcpu_fw_ids_valid = B_FALSE; enfip->enfi_rx_dpcpu_fw_id = 0; enfip->enfi_tx_dpcpu_fw_id = 0; } else { goto fail3; } - memcpy(enfip->enfi_mc_fw_version, mc_fw_version, sizeof(mc_fw_version)); + memcpy(enfip->enfi_mc_fw_version, mc_fw_version, + sizeof (mc_fw_version)); return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_DIAG __checkReturn efx_rc_t efx_nic_register_test( __in efx_nic_t *enp) { const efx_nic_ops_t *enop = enp->en_enop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC)); if ((rc = enop->eno_register_test(enp)) != 0) goto fail1; - - return (0); - -fail1: - EFSYS_PROBE1(fail1, efx_rc_t, rc); - - return (rc); -} - - __checkReturn efx_rc_t -efx_nic_test_registers( - __in efx_nic_t *enp, - __in efx_register_set_t *rsp, - __in size_t count) -{ - unsigned int bit; - efx_oword_t original; - efx_oword_t reg; - efx_oword_t buf; - efx_rc_t rc; - - while (count > 0) { - /* This function is only suitable for registers */ - EFSYS_ASSERT(rsp->rows == 1); - - /* bit sweep on and off */ - EFSYS_BAR_READO(enp->en_esbp, rsp->address, &original, - B_TRUE); - for (bit = 0; bit < 128; bit++) { - /* Is this bit in the mask? */ - if (~(rsp->mask.eo_u32[bit >> 5]) & (1 << bit)) - continue; - - /* Test this bit can be set in isolation */ - reg = original; - EFX_AND_OWORD(reg, rsp->mask); - EFX_SET_OWORD_BIT(reg, bit); - - EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, ®, - B_TRUE); - EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf, - B_TRUE); - - EFX_AND_OWORD(buf, rsp->mask); - if (memcmp(®, &buf, sizeof (reg))) { - rc = EIO; - goto fail1; - } - - /* Test this bit can be cleared in isolation */ - EFX_OR_OWORD(reg, rsp->mask); - EFX_CLEAR_OWORD_BIT(reg, bit); - - EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, ®, - B_TRUE); - EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf, - B_TRUE); - - EFX_AND_OWORD(buf, rsp->mask); - if (memcmp(®, &buf, sizeof (reg))) { - rc = EIO; - goto fail2; - } - } - - /* Restore the old value */ - EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, - B_TRUE); - - --count; - ++rsp; - } - - return (0); - -fail2: - EFSYS_PROBE(fail2); -fail1: - EFSYS_PROBE1(fail1, efx_rc_t, rc); - - /* Restore the old value */ - EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE); - - return (rc); -} - - __checkReturn efx_rc_t -efx_nic_test_tables( - __in efx_nic_t *enp, - __in efx_register_set_t *rsp, - __in efx_pattern_type_t pattern, - __in size_t count) -{ - efx_sram_pattern_fn_t func; - unsigned int index; - unsigned int address; - efx_oword_t reg; - efx_oword_t buf; - efx_rc_t rc; - - EFSYS_ASSERT(pattern < EFX_PATTERN_NTYPES); - func = __efx_sram_pattern_fns[pattern]; - - while (count > 0) { - /* Write */ - address = rsp->address; - for (index = 0; index < rsp->rows; ++index) { - func(2 * index + 0, B_FALSE, ®.eo_qword[0]); - func(2 * index + 1, B_FALSE, ®.eo_qword[1]); - EFX_AND_OWORD(reg, rsp->mask); - EFSYS_BAR_WRITEO(enp->en_esbp, address, ®, B_TRUE); - - address += rsp->step; - } - - /* Read */ - address = rsp->address; - for (index = 0; index < rsp->rows; ++index) { - func(2 * index + 0, B_FALSE, ®.eo_qword[0]); - func(2 * index + 1, B_FALSE, ®.eo_qword[1]); - EFX_AND_OWORD(reg, rsp->mask); - EFSYS_BAR_READO(enp->en_esbp, address, &buf, B_TRUE); - if (memcmp(®, &buf, sizeof (reg))) { - rc = EIO; - goto fail1; - } - - address += rsp->step; - } - - ++rsp; - --count; - } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ #if EFSYS_OPT_LOOPBACK extern void efx_loopback_mask( __in efx_loopback_kind_t loopback_kind, __out efx_qword_t *maskp) { efx_qword_t mask; EFSYS_ASSERT3U(loopback_kind, <, EFX_LOOPBACK_NKINDS); EFSYS_ASSERT(maskp != NULL); /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XPORT == EFX_LOOPBACK_XPORT); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII_WS == EFX_LOOPBACK_XGMII_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS == EFX_LOOPBACK_XAUI_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_FAR == EFX_LOOPBACK_XAUI_WS_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_NEAR == EFX_LOOPBACK_XAUI_WS_NEAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_WS == EFX_LOOPBACK_GMII_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS == EFX_LOOPBACK_XFI_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS_FAR == EFX_LOOPBACK_XFI_WS_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS_WS == EFX_LOOPBACK_PHYXS_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT == EFX_LOOPBACK_PMA_INT); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_NEAR == EFX_LOOPBACK_SD_NEAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FAR == EFX_LOOPBACK_SD_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT_WS == EFX_LOOPBACK_PMA_INT_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP2_WS == EFX_LOOPBACK_SD_FEP2_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP1_5_WS == EFX_LOOPBACK_SD_FEP1_5_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP_WS == EFX_LOOPBACK_SD_FEP_WS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FES_WS == EFX_LOOPBACK_SD_FES_WS); /* Build bitmask of possible loopback types */ EFX_ZERO_QWORD(mask); if ((loopback_kind == EFX_LOOPBACK_KIND_OFF) || (loopback_kind == EFX_LOOPBACK_KIND_ALL)) { EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_OFF); } if ((loopback_kind == EFX_LOOPBACK_KIND_MAC) || (loopback_kind == EFX_LOOPBACK_KIND_ALL)) { /* * The "MAC" grouping has historically been used by drivers to * mean loopbacks supported by on-chip hardware. Keep that * meaning here, and include on-chip PHY layer loopbacks. */ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_DATA); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMAC); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGMII); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGXS); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGBR); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI_FAR); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII_FAR); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII_FAR); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI_FAR); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_INT); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_NEAR); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_FAR); } if ((loopback_kind == EFX_LOOPBACK_KIND_PHY) || (loopback_kind == EFX_LOOPBACK_KIND_ALL)) { /* * The "PHY" grouping has historically been used by drivers to * mean loopbacks supported by off-chip hardware. Keep that * meaning here. */ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GPHY); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PHY_XS); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PCS); EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_PMD); } *maskp = mask; } __checkReturn efx_rc_t efx_mcdi_get_loopback_modes( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_LOOPBACK_MODES_IN_LEN, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN)]; efx_qword_t mask; efx_qword_t modes; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_LOOPBACK_MODES; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST + MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN) { rc = EMSGSIZE; goto fail2; } /* * We assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree * in efx_loopback_mask() and in siena_phy.c:siena_phy_get_link(). */ efx_loopback_mask(EFX_LOOPBACK_KIND_ALL, &mask); EFX_AND_QWORD(mask, *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_SUGGESTED)); modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_100M); EFX_AND_QWORD(modes, mask); encp->enc_loopback_types[EFX_LINK_100FDX] = modes; modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_1G); EFX_AND_QWORD(modes, mask); encp->enc_loopback_types[EFX_LINK_1000FDX] = modes; modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_10G); EFX_AND_QWORD(modes, mask); encp->enc_loopback_types[EFX_LINK_10000FDX] = modes; if (req.emr_out_length_used >= MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST + MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN) { /* Response includes 40G loopback modes */ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_40G); EFX_AND_QWORD(modes, mask); encp->enc_loopback_types[EFX_LINK_40000FDX] = modes; } EFX_ZERO_QWORD(modes); EFX_SET_QWORD_BIT(modes, EFX_LOOPBACK_OFF); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100FDX]); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_1000FDX]); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_10000FDX]); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_40000FDX]); encp->enc_loopback_types[EFX_LINK_UNKNOWN] = modes; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_LOOPBACK */ __checkReturn efx_rc_t efx_nic_calculate_pcie_link_bandwidth( __in uint32_t pcie_link_width, __in uint32_t pcie_link_gen, __out uint32_t *bandwidth_mbpsp) { uint32_t lane_bandwidth; uint32_t total_bandwidth; efx_rc_t rc; if ((pcie_link_width == 0) || (pcie_link_width > 16) || !ISP2(pcie_link_width)) { rc = EINVAL; goto fail1; } switch (pcie_link_gen) { case EFX_PCIE_LINK_SPEED_GEN1: /* 2.5 Gb/s raw bandwidth with 8b/10b encoding */ lane_bandwidth = 2000; break; case EFX_PCIE_LINK_SPEED_GEN2: /* 5.0 Gb/s raw bandwidth with 8b/10b encoding */ lane_bandwidth = 4000; break; case EFX_PCIE_LINK_SPEED_GEN3: /* 8.0 Gb/s raw bandwidth with 128b/130b encoding */ lane_bandwidth = 7877; break; default: rc = EINVAL; goto fail2; } total_bandwidth = lane_bandwidth * pcie_link_width; *bandwidth_mbpsp = total_bandwidth; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_nic_check_pcie_link_speed( __in efx_nic_t *enp, __in uint32_t pcie_link_width, __in uint32_t pcie_link_gen, __out efx_pcie_link_performance_t *resultp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t bandwidth; efx_pcie_link_performance_t result; efx_rc_t rc; if ((encp->enc_required_pcie_bandwidth_mbps == 0) || (pcie_link_width == 0) || (pcie_link_width == 32) || (pcie_link_gen == 0)) { /* * No usable info on what is required and/or in use. In virtual * machines, sometimes the PCIe link width is reported as 0 or * 32, or the speed as 0. */ result = EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH; goto out; } /* Calculate the available bandwidth in megabits per second */ rc = efx_nic_calculate_pcie_link_bandwidth(pcie_link_width, pcie_link_gen, &bandwidth); if (rc != 0) goto fail1; if (bandwidth < encp->enc_required_pcie_bandwidth_mbps) { result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH; } else if (pcie_link_gen < encp->enc_max_pcie_link_gen) { /* The link provides enough bandwidth but not optimal latency */ result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY; } else { result = EFX_PCIE_LINK_PERFORMANCE_OPTIMAL; } out: *resultp = result; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } Index: projects/clang700-import/sys/dev/sfxge/common/efx_port.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_port.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_port.c (revision 340918) @@ -1,257 +1,256 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" __checkReturn efx_rc_t efx_port_init( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); const efx_phy_ops_t *epop = epp->ep_epop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (enp->en_mod_flags & EFX_MOD_PORT) { rc = EINVAL; goto fail1; } enp->en_mod_flags |= EFX_MOD_PORT; epp->ep_mac_type = EFX_MAC_INVALID; epp->ep_link_mode = EFX_LINK_UNKNOWN; epp->ep_mac_drain = B_TRUE; /* Configure the MAC */ if ((rc = efx_mac_select(enp)) != 0) goto fail1; epp->ep_emop->emo_reconfigure(enp); /* Pick up current phy capababilities */ efx_port_poll(enp, NULL); /* * Turn on the PHY if available, otherwise reset it, and * reconfigure it with the current configuration. */ if (epop->epo_power != NULL) { if ((rc = epop->epo_power(enp, B_TRUE)) != 0) goto fail2; } else { if ((rc = epop->epo_reset(enp)) != 0) goto fail2; } EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_PHY); enp->en_reset_flags &= ~EFX_RESET_PHY; if ((rc = epop->epo_reconfigure(enp)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_mod_flags &= ~EFX_MOD_PORT; return (rc); } __checkReturn efx_rc_t efx_port_poll( __in efx_nic_t *enp, __out_opt efx_link_mode_t *link_modep) { efx_port_t *epp = &(enp->en_port); const efx_mac_ops_t *emop = epp->ep_emop; efx_link_mode_t ignore_link_mode; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); - EFSYS_ASSERT(!epp->ep_mac_stats_pending); if (link_modep == NULL) link_modep = &ignore_link_mode; if ((rc = emop->emo_poll(enp, link_modep)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_LOOPBACK __checkReturn efx_rc_t efx_port_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t loopback_type) { efx_port_t *epp = &(enp->en_port); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); const efx_mac_ops_t *emop = epp->ep_emop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(emop != NULL); EFSYS_ASSERT(link_mode < EFX_LINK_NMODES); if (EFX_TEST_QWORD_BIT(encp->enc_loopback_types[link_mode], loopback_type) == 0) { rc = ENOTSUP; goto fail1; } if (epp->ep_loopback_type == loopback_type && epp->ep_loopback_link_mode == link_mode) return (0); if ((rc = emop->emo_loopback_set(enp, link_mode, loopback_type)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #if EFSYS_OPT_NAMES static const char * const __efx_loopback_type_name[] = { "OFF", "DATA", "GMAC", "XGMII", "XGXS", "XAUI", "GMII", "SGMII", "XGBR", "XFI", "XAUI_FAR", "GMII_FAR", "SGMII_FAR", "XFI_FAR", "GPHY", "PHY_XS", "PCS", "PMA_PMD", "XPORT", "XGMII_WS", "XAUI_WS", "XAUI_WS_FAR", "XAUI_WS_NEAR", "GMII_WS", "XFI_WS", "XFI_WS_FAR", "PHYXS_WS", "PMA_INT", "SD_NEAR", "SD_FAR", "PMA_INT_WS", "SD_FEP2_WS", "SD_FEP1_5_WS", "SD_FEP_WS", "SD_FES_WS", }; __checkReturn const char * efx_loopback_type_name( __in efx_nic_t *enp, __in efx_loopback_type_t type) { EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__efx_loopback_type_name) == EFX_LOOPBACK_NTYPES); _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(type, <, EFX_LOOPBACK_NTYPES); return (__efx_loopback_type_name[type]); } #endif /* EFSYS_OPT_NAMES */ #endif /* EFSYS_OPT_LOOPBACK */ void efx_port_fini( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); const efx_phy_ops_t *epop = epp->ep_epop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT); EFSYS_ASSERT(epp->ep_mac_drain); epp->ep_emop = NULL; epp->ep_mac_type = EFX_MAC_INVALID; epp->ep_mac_drain = B_FALSE; /* Turn off the PHY */ if (epop->epo_power != NULL) (void) epop->epo_power(enp, B_FALSE); enp->en_mod_flags &= ~EFX_MOD_PORT; } Index: projects/clang700-import/sys/dev/sfxge/common/efx_rx.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_rx.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_rx.c (revision 340918) @@ -1,1424 +1,1471 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t siena_rx_init( __in efx_nic_t *enp); static void siena_rx_fini( __in efx_nic_t *enp); #if EFSYS_OPT_RX_SCATTER static __checkReturn efx_rc_t siena_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size); #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE static __checkReturn efx_rc_t siena_rx_scale_mode_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert); static __checkReturn efx_rc_t siena_rx_scale_key_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) uint8_t *key, __in size_t n); static __checkReturn efx_rc_t siena_rx_scale_tbl_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) unsigned int *table, __in size_t n); static __checkReturn uint32_t siena_rx_prefix_hash( __in efx_nic_t *enp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer); #endif /* EFSYS_OPT_RX_SCALE */ static __checkReturn efx_rc_t siena_rx_prefix_pktlen( __in efx_nic_t *enp, __in uint8_t *buffer, __out uint16_t *lengthp); -static void +static void siena_rx_qpost( - __in efx_rxq_t *erp, - __in_ecount(n) efsys_dma_addr_t *addrp, - __in size_t size, - __in unsigned int n, - __in unsigned int completed, - __in unsigned int added); + __in efx_rxq_t *erp, + __in_ecount(ndescs) efsys_dma_addr_t *addrp, + __in size_t size, + __in unsigned int ndescs, + __in unsigned int completed, + __in unsigned int added); static void siena_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp); #if EFSYS_OPT_RX_PACKED_STREAM static void siena_rx_qpush_ps_credits( __in efx_rxq_t *erp); static __checkReturn uint8_t * siena_rx_qps_packet_info( __in efx_rxq_t *erp, __in uint8_t *buffer, __in uint32_t buffer_length, __in uint32_t current_offset, __out uint16_t *lengthp, __out uint32_t *next_offsetp, __out uint32_t *timestamp); #endif static __checkReturn efx_rc_t siena_rx_qflush( __in efx_rxq_t *erp); static void siena_rx_qenable( __in efx_rxq_t *erp); static __checkReturn efx_rc_t siena_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, + __in uint32_t type_data, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, + __in unsigned int flags, __in efx_evq_t *eep, __in efx_rxq_t *erp); static void siena_rx_qdestroy( __in efx_rxq_t *erp); #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA static const efx_rx_ops_t __efx_rx_siena_ops = { siena_rx_init, /* erxo_init */ siena_rx_fini, /* erxo_fini */ #if EFSYS_OPT_RX_SCATTER siena_rx_scatter_enable, /* erxo_scatter_enable */ #endif #if EFSYS_OPT_RX_SCALE NULL, /* erxo_scale_context_alloc */ NULL, /* erxo_scale_context_free */ siena_rx_scale_mode_set, /* erxo_scale_mode_set */ siena_rx_scale_key_set, /* erxo_scale_key_set */ siena_rx_scale_tbl_set, /* erxo_scale_tbl_set */ siena_rx_prefix_hash, /* erxo_prefix_hash */ #endif siena_rx_prefix_pktlen, /* erxo_prefix_pktlen */ siena_rx_qpost, /* erxo_qpost */ siena_rx_qpush, /* erxo_qpush */ #if EFSYS_OPT_RX_PACKED_STREAM siena_rx_qpush_ps_credits, /* erxo_qpush_ps_credits */ siena_rx_qps_packet_info, /* erxo_qps_packet_info */ #endif siena_rx_qflush, /* erxo_qflush */ siena_rx_qenable, /* erxo_qenable */ siena_rx_qcreate, /* erxo_qcreate */ siena_rx_qdestroy, /* erxo_qdestroy */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD static const efx_rx_ops_t __efx_rx_ef10_ops = { ef10_rx_init, /* erxo_init */ ef10_rx_fini, /* erxo_fini */ #if EFSYS_OPT_RX_SCATTER ef10_rx_scatter_enable, /* erxo_scatter_enable */ #endif #if EFSYS_OPT_RX_SCALE ef10_rx_scale_context_alloc, /* erxo_scale_context_alloc */ ef10_rx_scale_context_free, /* erxo_scale_context_free */ ef10_rx_scale_mode_set, /* erxo_scale_mode_set */ ef10_rx_scale_key_set, /* erxo_scale_key_set */ ef10_rx_scale_tbl_set, /* erxo_scale_tbl_set */ ef10_rx_prefix_hash, /* erxo_prefix_hash */ #endif ef10_rx_prefix_pktlen, /* erxo_prefix_pktlen */ ef10_rx_qpost, /* erxo_qpost */ ef10_rx_qpush, /* erxo_qpush */ #if EFSYS_OPT_RX_PACKED_STREAM ef10_rx_qpush_ps_credits, /* erxo_qpush_ps_credits */ ef10_rx_qps_packet_info, /* erxo_qps_packet_info */ #endif ef10_rx_qflush, /* erxo_qflush */ ef10_rx_qenable, /* erxo_qenable */ ef10_rx_qcreate, /* erxo_qcreate */ ef10_rx_qdestroy, /* erxo_qdestroy */ }; #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_rx_init( __inout efx_nic_t *enp) { const efx_rx_ops_t *erxop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (!(enp->en_mod_flags & EFX_MOD_EV)) { rc = EINVAL; goto fail1; } if (enp->en_mod_flags & EFX_MOD_RX) { rc = EINVAL; goto fail2; } switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: erxop = &__efx_rx_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: erxop = &__efx_rx_ef10_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: erxop = &__efx_rx_ef10_ops; break; #endif /* EFSYS_OPT_MEDFORD */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail3; } if ((rc = erxop->erxo_init(enp)) != 0) goto fail4; enp->en_erxop = erxop; enp->en_mod_flags |= EFX_MOD_RX; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_erxop = NULL; enp->en_mod_flags &= ~EFX_MOD_RX; return (rc); } void efx_rx_fini( __in efx_nic_t *enp) { const efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); EFSYS_ASSERT3U(enp->en_rx_qcount, ==, 0); erxop->erxo_fini(enp); enp->en_erxop = NULL; enp->en_mod_flags &= ~EFX_MOD_RX; } #if EFSYS_OPT_RX_SCATTER __checkReturn efx_rc_t efx_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size) { const efx_rx_ops_t *erxop = enp->en_erxop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if ((rc = erxop->erxo_scatter_enable(enp, buf_size)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t efx_rx_hash_default_support_get( __in efx_nic_t *enp, __out efx_rx_hash_support_t *supportp) { efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if (supportp == NULL) { rc = EINVAL; goto fail1; } /* * Report the hashing support the client gets by default if it * does not allocate an RSS context itself. */ *supportp = enp->en_hash_support; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_rx_scale_default_support_get( __in efx_nic_t *enp, __out efx_rx_scale_context_type_t *typep) { efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if (typep == NULL) { rc = EINVAL; goto fail1; } /* * Report the RSS support the client gets by default if it * does not allocate an RSS context itself. */ *typep = enp->en_rss_context_type; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t efx_rx_scale_context_alloc( __in efx_nic_t *enp, __in efx_rx_scale_context_type_t type, __in uint32_t num_queues, __out uint32_t *rss_contextp) { const efx_rx_ops_t *erxop = enp->en_erxop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if (erxop->erxo_scale_context_alloc == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = erxop->erxo_scale_context_alloc(enp, type, num_queues, rss_contextp)) != 0) { goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t efx_rx_scale_context_free( __in efx_nic_t *enp, __in uint32_t rss_context) { const efx_rx_ops_t *erxop = enp->en_erxop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if (erxop->erxo_scale_context_free == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = erxop->erxo_scale_context_free(enp, rss_context)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t efx_rx_scale_mode_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert) { const efx_rx_ops_t *erxop = enp->en_erxop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if (erxop->erxo_scale_mode_set != NULL) { if ((rc = erxop->erxo_scale_mode_set(enp, rss_context, alg, type, insert)) != 0) goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t efx_rx_scale_key_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) uint8_t *key, __in size_t n) { const efx_rx_ops_t *erxop = enp->en_erxop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if ((rc = erxop->erxo_scale_key_set(enp, rss_context, key, n)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t efx_rx_scale_tbl_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) unsigned int *table, __in size_t n) { const efx_rx_ops_t *erxop = enp->en_erxop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if ((rc = erxop->erxo_scale_tbl_set(enp, rss_context, table, n)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ - void + void efx_rx_qpost( - __in efx_rxq_t *erp, - __in_ecount(n) efsys_dma_addr_t *addrp, - __in size_t size, - __in unsigned int n, - __in unsigned int completed, - __in unsigned int added) + __in efx_rxq_t *erp, + __in_ecount(ndescs) efsys_dma_addr_t *addrp, + __in size_t size, + __in unsigned int ndescs, + __in unsigned int completed, + __in unsigned int added) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); - erxop->erxo_qpost(erp, addrp, size, n, completed, added); + erxop->erxo_qpost(erp, addrp, size, ndescs, completed, added); } #if EFSYS_OPT_RX_PACKED_STREAM void efx_rx_qpush_ps_credits( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); erxop->erxo_qpush_ps_credits(erp); } __checkReturn uint8_t * efx_rx_qps_packet_info( __in efx_rxq_t *erp, __in uint8_t *buffer, __in uint32_t buffer_length, __in uint32_t current_offset, __out uint16_t *lengthp, __out uint32_t *next_offsetp, __out uint32_t *timestamp) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; return (erxop->erxo_qps_packet_info(erp, buffer, buffer_length, current_offset, lengthp, next_offsetp, timestamp)); } #endif /* EFSYS_OPT_RX_PACKED_STREAM */ void efx_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); erxop->erxo_qpush(erp, added, pushedp); } __checkReturn efx_rc_t efx_rx_qflush( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; efx_rc_t rc; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); if ((rc = erxop->erxo_qflush(erp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_rx_qenable( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); erxop->erxo_qenable(erp); } - __checkReturn efx_rc_t -efx_rx_qcreate( +static __checkReturn efx_rc_t +efx_rx_qcreate_internal( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, + __in uint32_t type_data, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, + __in unsigned int flags, __in efx_evq_t *eep, __deref_out efx_rxq_t **erpp) { const efx_rx_ops_t *erxop = enp->en_erxop; efx_rxq_t *erp; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); /* Allocate an RXQ object */ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_rxq_t), erp); if (erp == NULL) { rc = ENOMEM; goto fail1; } erp->er_magic = EFX_RXQ_MAGIC; erp->er_enp = enp; erp->er_index = index; - erp->er_mask = n - 1; + erp->er_mask = ndescs - 1; erp->er_esmp = esmp; - if ((rc = erxop->erxo_qcreate(enp, index, label, type, esmp, n, id, - eep, erp)) != 0) + if ((rc = erxop->erxo_qcreate(enp, index, label, type, type_data, esmp, + ndescs, id, flags, eep, erp)) != 0) goto fail2; enp->en_rx_qcount++; *erpp = erp; return (0); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } + __checkReturn efx_rc_t +efx_rx_qcreate( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in efx_rxq_type_t type, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in uint32_t id, + __in unsigned int flags, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp) +{ + return efx_rx_qcreate_internal(enp, index, label, type, 0, esmp, ndescs, + id, flags, eep, erpp); +} + +#if EFSYS_OPT_RX_PACKED_STREAM + + __checkReturn efx_rc_t +efx_rx_qcreate_packed_stream( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in uint32_t ps_buf_size, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp) +{ + return efx_rx_qcreate_internal(enp, index, label, + EFX_RXQ_TYPE_PACKED_STREAM, ps_buf_size, esmp, ndescs, + 0 /* id unused on EF10 */, EFX_RXQ_FLAG_NONE, eep, erpp); +} + +#endif + void efx_rx_qdestroy( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); erxop->erxo_qdestroy(erp); } __checkReturn efx_rc_t efx_pseudo_hdr_pkt_length_get( __in efx_rxq_t *erp, __in uint8_t *buffer, __out uint16_t *lengthp) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); return (erxop->erxo_prefix_pktlen(enp, buffer, lengthp)); } #if EFSYS_OPT_RX_SCALE __checkReturn uint32_t efx_pseudo_hdr_hash_get( __in efx_rxq_t *erp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); EFSYS_ASSERT3U(enp->en_hash_support, ==, EFX_RX_HASH_AVAILABLE); return (erxop->erxo_prefix_hash(enp, func, buffer)); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t siena_rx_init( __in efx_nic_t *enp) { efx_oword_t oword; unsigned int index; EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, 0x3000 / 32); EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword); /* Zero the RSS table */ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) { EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL, index, &oword, B_TRUE); } #if EFSYS_OPT_RX_SCALE /* The RSS key and indirection table are writable. */ enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE; /* Hardware can insert RX hash with/without RSS */ enp->en_hash_support = EFX_RX_HASH_AVAILABLE; #endif /* EFSYS_OPT_RX_SCALE */ return (0); } #if EFSYS_OPT_RX_SCATTER static __checkReturn efx_rc_t siena_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size) { unsigned int nbuf32; efx_oword_t oword; efx_rc_t rc; nbuf32 = buf_size / 32; if ((nbuf32 == 0) || (nbuf32 >= (1 << FRF_BZ_RX_USR_BUF_SIZE_WIDTH)) || ((buf_size % 32) != 0)) { rc = EINVAL; goto fail1; } if (enp->en_rx_qcount > 0) { rc = EBUSY; goto fail2; } /* Set scatter buffer size */ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, nbuf32); EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword); /* Enable scatter for packets not matching a filter */ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1); EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCATTER */ #define EFX_RX_LFSR_HASH(_enp, _insert) \ do { \ efx_oword_t oword; \ \ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \ (_insert) ? 1 : 0); \ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \ \ if ((_enp)->en_family == EFX_FAMILY_SIENA) { \ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \ &oword); \ EFX_SET_OWORD_FIELD(oword, \ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 0); \ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \ &oword); \ } \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_RX_TOEPLITZ_IPV4_HASH(_enp, _insert, _ip, _tcp) \ do { \ efx_oword_t oword; \ \ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 1); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, \ (_ip) ? 1 : 0); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, \ (_tcp) ? 0 : 1); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \ (_insert) ? 1 : 0); \ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_RX_TOEPLITZ_IPV6_HASH(_enp, _ip, _tcp, _rc) \ do { \ efx_oword_t oword; \ \ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \ EFX_SET_OWORD_FIELD(oword, \ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \ EFX_SET_OWORD_FIELD(oword, \ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, (_ip) ? 1 : 0); \ EFX_SET_OWORD_FIELD(oword, \ FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS, (_tcp) ? 0 : 1); \ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \ \ (_rc) = 0; \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #if EFSYS_OPT_RX_SCALE static __checkReturn efx_rc_t siena_rx_scale_mode_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert) { efx_rc_t rc; if (rss_context != EFX_RSS_CONTEXT_DEFAULT) { rc = EINVAL; goto fail1; } switch (alg) { case EFX_RX_HASHALG_LFSR: EFX_RX_LFSR_HASH(enp, insert); break; case EFX_RX_HASHALG_TOEPLITZ: EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert, type & EFX_RX_HASH_IPV4, type & EFX_RX_HASH_TCPIPV4); EFX_RX_TOEPLITZ_IPV6_HASH(enp, type & EFX_RX_HASH_IPV6, type & EFX_RX_HASH_TCPIPV6, rc); if (rc != 0) goto fail2; break; default: rc = EINVAL; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); EFX_RX_LFSR_HASH(enp, B_FALSE); return (rc); } #endif #if EFSYS_OPT_RX_SCALE static __checkReturn efx_rc_t siena_rx_scale_key_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) uint8_t *key, __in size_t n) { efx_oword_t oword; unsigned int byte; unsigned int offset; efx_rc_t rc; if (rss_context != EFX_RSS_CONTEXT_DEFAULT) { rc = EINVAL; goto fail1; } byte = 0; /* Write Toeplitz IPv4 hash key */ EFX_ZERO_OWORD(oword); for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8; offset > 0 && byte < n; --offset) oword.eo_u8[offset - 1] = key[byte++]; EFX_BAR_WRITEO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword); byte = 0; /* Verify Toeplitz IPv4 hash key */ EFX_BAR_READO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword); for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8; offset > 0 && byte < n; --offset) { if (oword.eo_u8[offset - 1] != key[byte++]) { rc = EFAULT; goto fail2; } } if ((enp->en_features & EFX_FEATURE_IPV6) == 0) goto done; byte = 0; /* Write Toeplitz IPv6 hash key 3 */ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8; offset > 0 && byte < n; --offset) oword.eo_u8[offset - 1] = key[byte++]; EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword); /* Write Toeplitz IPv6 hash key 2 */ EFX_ZERO_OWORD(oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8; offset > 0 && byte < n; --offset) oword.eo_u8[offset - 1] = key[byte++]; EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword); /* Write Toeplitz IPv6 hash key 1 */ EFX_ZERO_OWORD(oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8; offset > 0 && byte < n; --offset) oword.eo_u8[offset - 1] = key[byte++]; EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword); byte = 0; /* Verify Toeplitz IPv6 hash key 3 */ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8; offset > 0 && byte < n; --offset) { if (oword.eo_u8[offset - 1] != key[byte++]) { rc = EFAULT; goto fail3; } } /* Verify Toeplitz IPv6 hash key 2 */ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8; offset > 0 && byte < n; --offset) { if (oword.eo_u8[offset - 1] != key[byte++]) { rc = EFAULT; goto fail4; } } /* Verify Toeplitz IPv6 hash key 1 */ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8; offset > 0 && byte < n; --offset) { if (oword.eo_u8[offset - 1] != key[byte++]) { rc = EFAULT; goto fail5; } } done: return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif #if EFSYS_OPT_RX_SCALE static __checkReturn efx_rc_t siena_rx_scale_tbl_set( __in efx_nic_t *enp, __in uint32_t rss_context, __in_ecount(n) unsigned int *table, __in size_t n) { efx_oword_t oword; int index; efx_rc_t rc; EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS); EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH)); if (rss_context != EFX_RSS_CONTEXT_DEFAULT) { rc = EINVAL; goto fail1; } if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) { rc = EINVAL; goto fail2; } for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) { uint32_t byte; /* Calculate the entry to place in the table */ byte = (n > 0) ? (uint32_t)table[index % n] : 0; EFSYS_PROBE2(table, int, index, uint32_t, byte); EFX_POPULATE_OWORD_1(oword, FRF_BZ_IT_QUEUE, byte); /* Write the table */ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL, index, &oword, B_TRUE); } for (index = FR_BZ_RX_INDIRECTION_TBL_ROWS - 1; index >= 0; --index) { uint32_t byte; /* Determine if we're starting a new batch */ byte = (n > 0) ? (uint32_t)table[index % n] : 0; /* Read the table */ EFX_BAR_TBL_READO(enp, FR_BZ_RX_INDIRECTION_TBL, index, &oword, B_TRUE); /* Verify the entry */ if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) { rc = EFAULT; goto fail3; } } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* * Falcon/Siena pseudo-header * -------------------------- * * Receive packets are prefixed by an optional 16 byte pseudo-header. * The pseudo-header is a byte array of one of the forms: * * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.TT.TT.TT.TT * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.LL.LL * * where: * TT.TT.TT.TT Toeplitz hash (32-bit big-endian) * LL.LL LFSR hash (16-bit big-endian) */ #if EFSYS_OPT_RX_SCALE static __checkReturn uint32_t siena_rx_prefix_hash( __in efx_nic_t *enp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer) { _NOTE(ARGUNUSED(enp)) switch (func) { case EFX_RX_HASHALG_TOEPLITZ: return ((buffer[12] << 24) | (buffer[13] << 16) | (buffer[14] << 8) | buffer[15]); case EFX_RX_HASHALG_LFSR: return ((buffer[14] << 8) | buffer[15]); default: EFSYS_ASSERT(0); return (0); } } #endif /* EFSYS_OPT_RX_SCALE */ static __checkReturn efx_rc_t siena_rx_prefix_pktlen( __in efx_nic_t *enp, __in uint8_t *buffer, __out uint16_t *lengthp) { _NOTE(ARGUNUSED(enp, buffer, lengthp)) /* Not supported by Falcon/Siena hardware */ EFSYS_ASSERT(0); return (ENOTSUP); } -static void +static void siena_rx_qpost( - __in efx_rxq_t *erp, - __in_ecount(n) efsys_dma_addr_t *addrp, - __in size_t size, - __in unsigned int n, - __in unsigned int completed, - __in unsigned int added) + __in efx_rxq_t *erp, + __in_ecount(ndescs) efsys_dma_addr_t *addrp, + __in size_t size, + __in unsigned int ndescs, + __in unsigned int completed, + __in unsigned int added) { efx_qword_t qword; unsigned int i; unsigned int offset; unsigned int id; /* The client driver must not overfill the queue */ - EFSYS_ASSERT3U(added - completed + n, <=, + EFSYS_ASSERT3U(added - completed + ndescs, <=, EFX_RXQ_LIMIT(erp->er_mask + 1)); id = added & (erp->er_mask); - for (i = 0; i < n; i++) { + for (i = 0; i < ndescs; i++) { EFSYS_PROBE4(rx_post, unsigned int, erp->er_index, unsigned int, id, efsys_dma_addr_t, addrp[i], size_t, size); EFX_POPULATE_QWORD_3(qword, FSF_AZ_RX_KER_BUF_SIZE, (uint32_t)(size), FSF_AZ_RX_KER_BUF_ADDR_DW0, (uint32_t)(addrp[i] & 0xffffffff), FSF_AZ_RX_KER_BUF_ADDR_DW1, (uint32_t)(addrp[i] >> 32)); offset = id * sizeof (efx_qword_t); EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword); id = (id + 1) & (erp->er_mask); } } static void siena_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp) { efx_nic_t *enp = erp->er_enp; unsigned int pushed = *pushedp; uint32_t wptr; efx_oword_t oword; efx_dword_t dword; /* All descriptors are pushed */ *pushedp = added; /* Push the populated descriptors out */ wptr = added & erp->er_mask; EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DESC_WPTR, wptr); /* Only write the third DWORD */ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3)); /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1, wptr, pushed & erp->er_mask); EFSYS_PIO_WRITE_BARRIER(); EFX_BAR_TBL_WRITED3(enp, FR_BZ_RX_DESC_UPD_REGP0, erp->er_index, &dword, B_FALSE); } #if EFSYS_OPT_RX_PACKED_STREAM static void siena_rx_qpush_ps_credits( __in efx_rxq_t *erp) { /* Not supported by Siena hardware */ EFSYS_ASSERT(0); } static uint8_t * siena_rx_qps_packet_info( __in efx_rxq_t *erp, __in uint8_t *buffer, __in uint32_t buffer_length, __in uint32_t current_offset, __out uint16_t *lengthp, __out uint32_t *next_offsetp, __out uint32_t *timestamp) { /* Not supported by Siena hardware */ EFSYS_ASSERT(0); return (NULL); } #endif /* EFSYS_OPT_RX_PACKED_STREAM */ static __checkReturn efx_rc_t siena_rx_qflush( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_oword_t oword; uint32_t label; label = erp->er_index; /* Flush the queue */ EFX_POPULATE_OWORD_2(oword, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, FRF_AZ_RX_FLUSH_DESCQ, label); EFX_BAR_WRITEO(enp, FR_AZ_RX_FLUSH_DESCQ_REG, &oword); return (0); } static void siena_rx_qenable( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_oword_t oword; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); EFX_BAR_TBL_READO(enp, FR_AZ_RX_DESC_PTR_TBL, erp->er_index, &oword, B_TRUE); EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DC_HW_RPTR, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_HW_RPTR, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_EN, 1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL, erp->er_index, &oword, B_TRUE); } static __checkReturn efx_rc_t siena_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, + __in uint32_t type_data, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, + __in unsigned int flags, __in efx_evq_t *eep, __in efx_rxq_t *erp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_oword_t oword; uint32_t size; - boolean_t jumbo; + boolean_t jumbo = B_FALSE; efx_rc_t rc; _NOTE(ARGUNUSED(esmp)) + _NOTE(ARGUNUSED(type_data)) EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH)); EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS); EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit); EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS)); EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS)); - if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) { + if (!ISP2(ndescs) || + (ndescs < EFX_RXQ_MINNDESCS) || (ndescs > EFX_RXQ_MAXNDESCS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_rxq_limit) { rc = EINVAL; goto fail2; } for (size = 0; (1 << size) <= (EFX_RXQ_MAXNDESCS / EFX_RXQ_MINNDESCS); size++) - if ((1 << size) == (int)(n / EFX_RXQ_MINNDESCS)) + if ((1 << size) == (int)(ndescs / EFX_RXQ_MINNDESCS)) break; if (id + (1 << size) >= encp->enc_buftbl_limit) { rc = EINVAL; goto fail3; } switch (type) { case EFX_RXQ_TYPE_DEFAULT: - jumbo = B_FALSE; break; -#if EFSYS_OPT_RX_SCATTER - case EFX_RXQ_TYPE_SCATTER: - if (enp->en_family < EFX_FAMILY_SIENA) { - rc = EINVAL; - goto fail4; - } - jumbo = B_TRUE; - break; -#endif /* EFSYS_OPT_RX_SCATTER */ - default: rc = EINVAL; goto fail4; } + if (flags & EFX_RXQ_FLAG_SCATTER) { +#if EFSYS_OPT_RX_SCATTER + jumbo = B_TRUE; +#else + rc = EINVAL; + goto fail5; +#endif /* EFSYS_OPT_RX_SCATTER */ + } + /* Set up the new descriptor queue */ EFX_POPULATE_OWORD_7(oword, FRF_AZ_RX_DESCQ_BUF_BASE_ID, id, FRF_AZ_RX_DESCQ_EVQ_ID, eep->ee_index, FRF_AZ_RX_DESCQ_OWNER_ID, 0, FRF_AZ_RX_DESCQ_LABEL, label, FRF_AZ_RX_DESCQ_SIZE, size, FRF_AZ_RX_DESCQ_TYPE, 0, FRF_AZ_RX_DESCQ_JUMBO, jumbo); EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL, erp->er_index, &oword, B_TRUE); return (0); +#if !EFSYS_OPT_RX_SCATTER +fail5: + EFSYS_PROBE(fail5); +#endif fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static void siena_rx_qdestroy( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_oword_t oword; EFSYS_ASSERT(enp->en_rx_qcount != 0); --enp->en_rx_qcount; /* Purge descriptor queue */ EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL, erp->er_index, &oword, B_TRUE); /* Free the RXQ object */ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp); } static void siena_rx_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } #endif /* EFSYS_OPT_SIENA */ Index: projects/clang700-import/sys/dev/sfxge/common/efx_tx.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_tx.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_tx.c (revision 340918) @@ -1,1112 +1,1112 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_QSTATS #define EFX_TX_QSTAT_INCR(_etp, _stat) \ do { \ (_etp)->et_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_TX_QSTAT_INCR(_etp, _stat) #endif #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t siena_tx_init( __in efx_nic_t *enp); static void siena_tx_fini( __in efx_nic_t *enp); static __checkReturn efx_rc_t siena_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __in efx_txq_t *etp, __out unsigned int *addedp); static void siena_tx_qdestroy( __in efx_txq_t *etp); -static __checkReturn efx_rc_t +static __checkReturn efx_rc_t siena_tx_qpost( - __in efx_txq_t *etp, - __in_ecount(n) efx_buffer_t *eb, - __in unsigned int n, - __in unsigned int completed, - __inout unsigned int *addedp); + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_buffer_t *eb, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp); static void siena_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed); static __checkReturn efx_rc_t siena_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns); static __checkReturn efx_rc_t siena_tx_qflush( __in efx_txq_t *etp); static void siena_tx_qenable( __in efx_txq_t *etp); - __checkReturn efx_rc_t + __checkReturn efx_rc_t siena_tx_qdesc_post( - __in efx_txq_t *etp, - __in_ecount(n) efx_desc_t *ed, - __in unsigned int n, - __in unsigned int completed, - __inout unsigned int *addedp); + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_desc_t *ed, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp); void siena_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp); #if EFSYS_OPT_QSTATS static void siena_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat); #endif #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA static const efx_tx_ops_t __efx_tx_siena_ops = { siena_tx_init, /* etxo_init */ siena_tx_fini, /* etxo_fini */ siena_tx_qcreate, /* etxo_qcreate */ siena_tx_qdestroy, /* etxo_qdestroy */ siena_tx_qpost, /* etxo_qpost */ siena_tx_qpush, /* etxo_qpush */ siena_tx_qpace, /* etxo_qpace */ siena_tx_qflush, /* etxo_qflush */ siena_tx_qenable, /* etxo_qenable */ NULL, /* etxo_qpio_enable */ NULL, /* etxo_qpio_disable */ NULL, /* etxo_qpio_write */ NULL, /* etxo_qpio_post */ siena_tx_qdesc_post, /* etxo_qdesc_post */ siena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */ NULL, /* etxo_qdesc_tso_create */ NULL, /* etxo_qdesc_tso2_create */ NULL, /* etxo_qdesc_vlantci_create */ #if EFSYS_OPT_QSTATS siena_tx_qstats_update, /* etxo_qstats_update */ #endif }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON static const efx_tx_ops_t __efx_tx_hunt_ops = { ef10_tx_init, /* etxo_init */ ef10_tx_fini, /* etxo_fini */ ef10_tx_qcreate, /* etxo_qcreate */ ef10_tx_qdestroy, /* etxo_qdestroy */ ef10_tx_qpost, /* etxo_qpost */ ef10_tx_qpush, /* etxo_qpush */ ef10_tx_qpace, /* etxo_qpace */ ef10_tx_qflush, /* etxo_qflush */ ef10_tx_qenable, /* etxo_qenable */ ef10_tx_qpio_enable, /* etxo_qpio_enable */ ef10_tx_qpio_disable, /* etxo_qpio_disable */ ef10_tx_qpio_write, /* etxo_qpio_write */ ef10_tx_qpio_post, /* etxo_qpio_post */ ef10_tx_qdesc_post, /* etxo_qdesc_post */ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */ ef10_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */ #if EFSYS_OPT_QSTATS ef10_tx_qstats_update, /* etxo_qstats_update */ #endif }; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD static const efx_tx_ops_t __efx_tx_medford_ops = { ef10_tx_init, /* etxo_init */ ef10_tx_fini, /* etxo_fini */ ef10_tx_qcreate, /* etxo_qcreate */ ef10_tx_qdestroy, /* etxo_qdestroy */ ef10_tx_qpost, /* etxo_qpost */ ef10_tx_qpush, /* etxo_qpush */ ef10_tx_qpace, /* etxo_qpace */ ef10_tx_qflush, /* etxo_qflush */ ef10_tx_qenable, /* etxo_qenable */ ef10_tx_qpio_enable, /* etxo_qpio_enable */ ef10_tx_qpio_disable, /* etxo_qpio_disable */ ef10_tx_qpio_write, /* etxo_qpio_write */ ef10_tx_qpio_post, /* etxo_qpio_post */ ef10_tx_qdesc_post, /* etxo_qdesc_post */ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */ NULL, /* etxo_qdesc_tso_create */ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */ #if EFSYS_OPT_QSTATS ef10_tx_qstats_update, /* etxo_qstats_update */ #endif }; #endif /* EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_tx_init( __in efx_nic_t *enp) { const efx_tx_ops_t *etxop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (!(enp->en_mod_flags & EFX_MOD_EV)) { rc = EINVAL; goto fail1; } if (enp->en_mod_flags & EFX_MOD_TX) { rc = EINVAL; goto fail2; } switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: etxop = &__efx_tx_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: etxop = &__efx_tx_hunt_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: etxop = &__efx_tx_medford_ops; break; #endif /* EFSYS_OPT_MEDFORD */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail3; } EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0); if ((rc = etxop->etxo_init(enp)) != 0) goto fail4; enp->en_etxop = etxop; enp->en_mod_flags |= EFX_MOD_TX; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_etxop = NULL; enp->en_mod_flags &= ~EFX_MOD_TX; return (rc); } void efx_tx_fini( __in efx_nic_t *enp) { const efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX); EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0); etxop->etxo_fini(enp); enp->en_etxop = NULL; enp->en_mod_flags &= ~EFX_MOD_TX; } __checkReturn efx_rc_t efx_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __deref_out efx_txq_t **etpp, __out unsigned int *addedp) { const efx_tx_ops_t *etxop = enp->en_etxop; - efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_txq_t *etp; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX); - EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <, encp->enc_txq_limit); + EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <, + enp->en_nic_cfg.enc_txq_limit); /* Allocate an TXQ object */ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_txq_t), etp); if (etp == NULL) { rc = ENOMEM; goto fail1; } etp->et_magic = EFX_TXQ_MAGIC; etp->et_enp = enp; etp->et_index = index; - etp->et_mask = n - 1; + etp->et_mask = ndescs - 1; etp->et_esmp = esmp; /* Initial descriptor index may be modified by etxo_qcreate */ *addedp = 0; if ((rc = etxop->etxo_qcreate(enp, index, label, esmp, - n, id, flags, eep, etp, addedp)) != 0) + ndescs, id, flags, eep, etp, addedp)) != 0) goto fail2; enp->en_tx_qcount++; *etpp = etp; return (0); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_tx_qdestroy( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT(enp->en_tx_qcount != 0); --enp->en_tx_qcount; etxop->etxo_qdestroy(etp); /* Free the TXQ object */ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp); } - __checkReturn efx_rc_t + __checkReturn efx_rc_t efx_tx_qpost( - __in efx_txq_t *etp, - __in_ecount(n) efx_buffer_t *eb, - __in unsigned int n, - __in unsigned int completed, - __inout unsigned int *addedp) + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_buffer_t *eb, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); - if ((rc = etxop->etxo_qpost(etp, eb, - n, completed, addedp)) != 0) + if ((rc = etxop->etxo_qpost(etp, eb, ndescs, completed, addedp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); etxop->etxo_qpush(etp, added, pushed); } __checkReturn efx_rc_t efx_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if ((rc = etxop->etxo_qpace(etp, ns)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_tx_qflush( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if ((rc = etxop->etxo_qflush(etp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_tx_qenable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); etxop->etxo_qenable(etp); } __checkReturn efx_rc_t efx_tx_qpio_enable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if (~enp->en_features & EFX_FEATURE_PIO_BUFFERS) { rc = ENOTSUP; goto fail1; } if (etxop->etxo_qpio_enable == NULL) { rc = ENOTSUP; goto fail2; } if ((rc = etxop->etxo_qpio_enable(etp)) != 0) goto fail3; return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_tx_qpio_disable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if (etxop->etxo_qpio_disable != NULL) etxop->etxo_qpio_disable(etp); } __checkReturn efx_rc_t efx_tx_qpio_write( __in efx_txq_t *etp, __in_ecount(buf_length) uint8_t *buffer, __in size_t buf_length, __in size_t pio_buf_offset) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if (etxop->etxo_qpio_write != NULL) { if ((rc = etxop->etxo_qpio_write(etp, buffer, buf_length, pio_buf_offset)) != 0) goto fail1; return (0); } return (ENOTSUP); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_tx_qpio_post( __in efx_txq_t *etp, __in size_t pkt_length, __in unsigned int completed, __inout unsigned int *addedp) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if (etxop->etxo_qpio_post != NULL) { if ((rc = etxop->etxo_qpio_post(etp, pkt_length, completed, addedp)) != 0) goto fail1; return (0); } return (ENOTSUP); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn efx_rc_t + __checkReturn efx_rc_t efx_tx_qdesc_post( - __in efx_txq_t *etp, - __in_ecount(n) efx_desc_t *ed, - __in unsigned int n, - __in unsigned int completed, - __inout unsigned int *addedp) + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_desc_t *ed, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; efx_rc_t rc; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if ((rc = etxop->etxo_qdesc_post(etp, ed, - n, completed, addedp)) != 0) + ndescs, completed, addedp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT(etxop->etxo_qdesc_dma_create != NULL); etxop->etxo_qdesc_dma_create(etp, addr, size, eop, edp); } void efx_tx_qdesc_tso_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint8_t tcp_flags, __out efx_desc_t *edp) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT(etxop->etxo_qdesc_tso_create != NULL); etxop->etxo_qdesc_tso_create(etp, ipv4_id, tcp_seq, tcp_flags, edp); } void efx_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint16_t mss, __out_ecount(count) efx_desc_t *edp, __in int count) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT(etxop->etxo_qdesc_tso2_create != NULL); etxop->etxo_qdesc_tso2_create(etp, ipv4_id, tcp_seq, mss, edp, count); } void efx_tx_qdesc_vlantci_create( __in efx_txq_t *etp, __in uint16_t tci, __out efx_desc_t *edp) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT(etxop->etxo_qdesc_vlantci_create != NULL); etxop->etxo_qdesc_vlantci_create(etp, tci, edp); } #if EFSYS_OPT_QSTATS void efx_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) { efx_nic_t *enp = etp->et_enp; const efx_tx_ops_t *etxop = enp->en_etxop; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); etxop->etxo_qstats_update(etp, stat); } #endif #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t siena_tx_init( __in efx_nic_t *enp) { efx_oword_t oword; /* * Disable the timer-based TX DMA backoff and allow TX DMA to be * controlled by the RX FIFO fill level (although always allow a * minimal trickle). */ EFX_BAR_READO(enp, FR_AZ_TX_RESERVED_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER, 0xfe); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER_EN, 1); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_ONE_PKT_PER_Q, 1); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PUSH_EN, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DIS_NON_IP_EV, 1); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_THRESHOLD, 2); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); /* * Filter all packets less than 14 bytes to avoid parsing * errors. */ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); EFX_BAR_WRITEO(enp, FR_AZ_TX_RESERVED_REG, &oword); /* * Do not set TX_NO_EOP_DISC_EN, since it limits packets to 16 * descriptors (which is bad). */ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_NO_EOP_DISC_EN, 0); EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword); return (0); } #define EFX_TX_DESC(_etp, _addr, _size, _eop, _added) \ do { \ unsigned int id; \ size_t offset; \ efx_qword_t qword; \ \ id = (_added)++ & (_etp)->et_mask; \ offset = id * sizeof (efx_qword_t); \ \ EFSYS_PROBE5(tx_post, unsigned int, (_etp)->et_index, \ unsigned int, id, efsys_dma_addr_t, (_addr), \ size_t, (_size), boolean_t, (_eop)); \ \ EFX_POPULATE_QWORD_4(qword, \ FSF_AZ_TX_KER_CONT, (_eop) ? 0 : 1, \ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)(_size), \ FSF_AZ_TX_KER_BUF_ADDR_DW0, \ (uint32_t)((_addr) & 0xffffffff), \ FSF_AZ_TX_KER_BUF_ADDR_DW1, \ (uint32_t)((_addr) >> 32)); \ EFSYS_MEM_WRITEQ((_etp)->et_esmp, offset, &qword); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) -static __checkReturn efx_rc_t +static __checkReturn efx_rc_t siena_tx_qpost( - __in efx_txq_t *etp, - __in_ecount(n) efx_buffer_t *eb, - __in unsigned int n, - __in unsigned int completed, - __inout unsigned int *addedp) + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_buffer_t *eb, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) { unsigned int added = *addedp; unsigned int i; int rc = ENOSPC; - if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) + if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) goto fail1; - for (i = 0; i < n; i++) { + for (i = 0; i < ndescs; i++) { efx_buffer_t *ebp = &eb[i]; efsys_dma_addr_t start = ebp->eb_addr; size_t size = ebp->eb_size; efsys_dma_addr_t end = start + size; /* * Fragments must not span 4k boundaries. * Here it is a stricter requirement than the maximum length. */ EFSYS_ASSERT(P2ROUNDUP(start + 1, etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= end); EFX_TX_DESC(etp, start, size, ebp->eb_eop, added); } EFX_TX_QSTAT_INCR(etp, TX_POST); *addedp = added; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static void siena_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed) { efx_nic_t *enp = etp->et_enp; uint32_t wptr; efx_dword_t dword; efx_oword_t oword; /* Push the populated descriptors out */ wptr = added & etp->et_mask; EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DESC_WPTR, wptr); /* Only write the third DWORD */ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3)); /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, pushed & etp->et_mask); EFSYS_PIO_WRITE_BARRIER(); EFX_BAR_TBL_WRITED3(enp, FR_BZ_TX_DESC_UPD_REGP0, etp->et_index, &dword, B_FALSE); } #define EFX_MAX_PACE_VALUE 20 #define EFX_TX_PACE_CLOCK_BASE 104 static __checkReturn efx_rc_t siena_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns) { efx_nic_t *enp = etp->et_enp; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_oword_t oword; unsigned int pace_val; unsigned int timer_period; efx_rc_t rc; if (ns == 0) { pace_val = 0; } else { /* * The pace_val to write into the table is s.t * ns <= timer_period * (2 ^ pace_val) */ timer_period = EFX_TX_PACE_CLOCK_BASE / encp->enc_clk_mult; for (pace_val = 1; pace_val <= EFX_MAX_PACE_VALUE; pace_val++) { if ((timer_period << pace_val) >= ns) break; } } if (pace_val > EFX_MAX_PACE_VALUE) { rc = EINVAL; goto fail1; } /* Update the pacing table */ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_PACE, pace_val); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_PACE_TBL, etp->et_index, &oword, B_TRUE); return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t siena_tx_qflush( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_oword_t oword; uint32_t label; efx_tx_qpace(etp, 0); label = etp->et_index; /* Flush the queue */ EFX_POPULATE_OWORD_2(oword, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, FRF_AZ_TX_FLUSH_DESCQ, label); EFX_BAR_WRITEO(enp, FR_AZ_TX_FLUSH_DESCQ_REG, &oword); return (0); } static void siena_tx_qenable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_oword_t oword; EFX_BAR_TBL_READO(enp, FR_AZ_TX_DESC_PTR_TBL, etp->et_index, &oword, B_TRUE); EFSYS_PROBE5(tx_descq_ptr, unsigned int, etp->et_index, uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_3), uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_2), uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_1), uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_0)); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DC_HW_RPTR, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_HW_RPTR, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_EN, 1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL, etp->et_index, &oword, B_TRUE); } static __checkReturn efx_rc_t siena_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, - __in size_t n, + __in size_t ndescs, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __in efx_txq_t *etp, __out unsigned int *addedp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_oword_t oword; uint32_t size; uint16_t inner_csum; efx_rc_t rc; _NOTE(ARGUNUSED(esmp)) EFX_STATIC_ASSERT(EFX_EV_TX_NLABELS == (1 << FRF_AZ_TX_DESCQ_LABEL_WIDTH)); EFSYS_ASSERT3U(label, <, EFX_EV_TX_NLABELS); EFSYS_ASSERT(ISP2(encp->enc_txq_max_ndescs)); EFX_STATIC_ASSERT(ISP2(EFX_TXQ_MINNDESCS)); - if (!ISP2(n) || (n < EFX_TXQ_MINNDESCS) || (n > EFX_EVQ_MAXNEVS)) { + if (!ISP2(ndescs) || + (ndescs < EFX_TXQ_MINNDESCS) || (ndescs > EFX_EVQ_MAXNEVS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_txq_limit) { rc = EINVAL; goto fail2; } for (size = 0; (1 << size) <= (int)(encp->enc_txq_max_ndescs / EFX_TXQ_MINNDESCS); size++) - if ((1 << size) == (int)(n / EFX_TXQ_MINNDESCS)) + if ((1 << size) == (int)(ndescs / EFX_TXQ_MINNDESCS)) break; if (id + (1 << size) >= encp->enc_buftbl_limit) { rc = EINVAL; goto fail3; } inner_csum = EFX_TXQ_CKSUM_INNER_IPV4 | EFX_TXQ_CKSUM_INNER_TCPUDP; if ((flags & inner_csum) != 0) { rc = EINVAL; goto fail4; } /* Set up the new descriptor queue */ *addedp = 0; EFX_POPULATE_OWORD_6(oword, FRF_AZ_TX_DESCQ_BUF_BASE_ID, id, FRF_AZ_TX_DESCQ_EVQ_ID, eep->ee_index, FRF_AZ_TX_DESCQ_OWNER_ID, 0, FRF_AZ_TX_DESCQ_LABEL, label, FRF_AZ_TX_DESCQ_SIZE, size, FRF_AZ_TX_DESCQ_TYPE, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_NON_IP_DROP_DIS, 1); EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_IP_CHKSM_DIS, (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1); EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_TCP_CHKSM_DIS, (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL, etp->et_index, &oword, B_TRUE); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } - __checkReturn efx_rc_t + __checkReturn efx_rc_t siena_tx_qdesc_post( - __in efx_txq_t *etp, - __in_ecount(n) efx_desc_t *ed, - __in unsigned int n, - __in unsigned int completed, - __inout unsigned int *addedp) + __in efx_txq_t *etp, + __in_ecount(ndescs) efx_desc_t *ed, + __in unsigned int ndescs, + __in unsigned int completed, + __inout unsigned int *addedp) { unsigned int added = *addedp; unsigned int i; efx_rc_t rc; - if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) { + if (added - completed + ndescs > EFX_TXQ_LIMIT(etp->et_mask + 1)) { rc = ENOSPC; goto fail1; } - for (i = 0; i < n; i++) { + for (i = 0; i < ndescs; i++) { efx_desc_t *edp = &ed[i]; unsigned int id; size_t offset; id = added++ & etp->et_mask; offset = id * sizeof (efx_desc_t); EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq); } EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index, - unsigned int, added, unsigned int, n); + unsigned int, added, unsigned int, ndescs); EFX_TX_QSTAT_INCR(etp, TX_POST); *addedp = added; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void siena_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp) { /* * Fragments must not span 4k boundaries. * Here it is a stricter requirement than the maximum length. */ EFSYS_ASSERT(P2ROUNDUP(addr + 1, etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= addr + size); EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index, efsys_dma_addr_t, addr, size_t, size, boolean_t, eop); EFX_POPULATE_QWORD_4(edp->ed_eq, FSF_AZ_TX_KER_CONT, eop ? 0 : 1, FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)size, FSF_AZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff), FSF_AZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32)); } #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES /* START MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock 2866874ecd7a363b */ static const char * const __efx_tx_qstat_name[] = { "post", "post_pio", }; /* END MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock */ const char * efx_tx_qstat_name( __in efx_nic_t *enp, __in unsigned int id) { _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(id, <, TX_NQSTATS); return (__efx_tx_qstat_name[id]); } #endif /* EFSYS_OPT_NAMES */ #endif /* EFSYS_OPT_QSTATS */ #if EFSYS_OPT_SIENA #if EFSYS_OPT_QSTATS static void siena_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) { unsigned int id; for (id = 0; id < TX_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, etp->et_stat[id]); etp->et_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ static void siena_tx_qdestroy( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_oword_t oword; /* Purge descriptor queue */ EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL, etp->et_index, &oword, B_TRUE); } static void siena_tx_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } #endif /* EFSYS_OPT_SIENA */ Index: projects/clang700-import/sys/dev/sfxge/common/efx_types.h =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_types.h (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_types.h (revision 340918) @@ -1,1651 +1,1652 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * Ackowledgement to Fen Systems Ltd. * * $FreeBSD$ */ #ifndef _SYS_EFX_TYPES_H #define _SYS_EFX_TYPES_H #include "efsys.h" #ifdef __cplusplus extern "C" { #endif /* * Bitfield access * * Solarflare NICs make extensive use of bitfields up to 128 bits * wide. Since there is no native 128-bit datatype on most systems, * and since 64-bit datatypes are inefficient on 32-bit systems and * vice versa, we wrap accesses in a way that uses the most efficient * datatype. * * The NICs are PCI devices and therefore little-endian. Since most * of the quantities that we deal with are DMAed to/from host memory, * we define our datatypes (efx_oword_t, efx_qword_t and efx_dword_t) * to be little-endian. * * In the less common case of using PIO for individual register * writes, we construct the little-endian datatype in host memory and * then use non-swapping register access primitives, rather than * constructing a native-endian datatype and relying on implicit * byte-swapping. (We use a similar strategy for register reads.) */ /* * NOTE: Field definitions here and elsewhere are done in terms of a lowest * bit number (LBN) and a width. */ #define EFX_DUMMY_FIELD_LBN 0 #define EFX_DUMMY_FIELD_WIDTH 0 #define EFX_BYTE_0_LBN 0 #define EFX_BYTE_0_WIDTH 8 #define EFX_BYTE_1_LBN 8 #define EFX_BYTE_1_WIDTH 8 #define EFX_BYTE_2_LBN 16 #define EFX_BYTE_2_WIDTH 8 #define EFX_BYTE_3_LBN 24 #define EFX_BYTE_3_WIDTH 8 #define EFX_BYTE_4_LBN 32 #define EFX_BYTE_4_WIDTH 8 #define EFX_BYTE_5_LBN 40 #define EFX_BYTE_5_WIDTH 8 #define EFX_BYTE_6_LBN 48 #define EFX_BYTE_6_WIDTH 8 #define EFX_BYTE_7_LBN 56 #define EFX_BYTE_7_WIDTH 8 #define EFX_WORD_0_LBN 0 #define EFX_WORD_0_WIDTH 16 #define EFX_WORD_1_LBN 16 #define EFX_WORD_1_WIDTH 16 #define EFX_WORD_2_LBN 32 #define EFX_WORD_2_WIDTH 16 #define EFX_WORD_3_LBN 48 #define EFX_WORD_3_WIDTH 16 #define EFX_DWORD_0_LBN 0 #define EFX_DWORD_0_WIDTH 32 #define EFX_DWORD_1_LBN 32 #define EFX_DWORD_1_WIDTH 32 #define EFX_DWORD_2_LBN 64 #define EFX_DWORD_2_WIDTH 32 #define EFX_DWORD_3_LBN 96 #define EFX_DWORD_3_WIDTH 32 -/* There are intentionally no EFX_QWORD_0 or EFX_QWORD_1 field definitions +/* + * There are intentionally no EFX_QWORD_0 or EFX_QWORD_1 field definitions * here as the implementaion of EFX_QWORD_FIELD and EFX_OWORD_FIELD do not * support field widths larger than 32 bits. */ /* Specified attribute (i.e. LBN ow WIDTH) of the specified field */ #define EFX_VAL(_field, _attribute) \ _field ## _ ## _attribute /* Lowest bit number of the specified field */ #define EFX_LOW_BIT(_field) \ EFX_VAL(_field, LBN) /* Width of the specified field */ #define EFX_WIDTH(_field) \ EFX_VAL(_field, WIDTH) /* Highest bit number of the specified field */ #define EFX_HIGH_BIT(_field) \ (EFX_LOW_BIT(_field) + EFX_WIDTH(_field) - 1) /* * 64-bit mask equal in width to the specified field. * * For example, a field with width 5 would have a mask of 0x000000000000001f. */ #define EFX_MASK64(_field) \ ((EFX_WIDTH(_field) == 64) ? ~((uint64_t)0) : \ (((((uint64_t)1) << EFX_WIDTH(_field))) - 1)) /* * 32-bit mask equal in width to the specified field. * * For example, a field with width 5 would have a mask of 0x0000001f. */ #define EFX_MASK32(_field) \ ((EFX_WIDTH(_field) == 32) ? ~((uint32_t)0) : \ (((((uint32_t)1) << EFX_WIDTH(_field))) - 1)) /* * 16-bit mask equal in width to the specified field. * * For example, a field with width 5 would have a mask of 0x001f. */ #define EFX_MASK16(_field) \ ((EFX_WIDTH(_field) == 16) ? 0xffffu : \ (uint16_t)((1 << EFX_WIDTH(_field)) - 1)) /* * 8-bit mask equal in width to the specified field. * * For example, a field with width 5 would have a mask of 0x1f. */ #define EFX_MASK8(_field) \ ((uint8_t)((1 << EFX_WIDTH(_field)) - 1)) #pragma pack(1) /* * A byte (i.e. 8-bit) datatype */ typedef union efx_byte_u { uint8_t eb_u8[1]; } efx_byte_t; /* * A word (i.e. 16-bit) datatype * * This datatype is defined to be little-endian. */ typedef union efx_word_u { efx_byte_t ew_byte[2]; uint16_t ew_u16[1]; uint8_t ew_u8[2]; } efx_word_t; /* * A doubleword (i.e. 32-bit) datatype * * This datatype is defined to be little-endian. */ typedef union efx_dword_u { efx_byte_t ed_byte[4]; efx_word_t ed_word[2]; uint32_t ed_u32[1]; uint16_t ed_u16[2]; uint8_t ed_u8[4]; } efx_dword_t; /* * A quadword (i.e. 64-bit) datatype * * This datatype is defined to be little-endian. */ typedef union efx_qword_u { efx_byte_t eq_byte[8]; efx_word_t eq_word[4]; efx_dword_t eq_dword[2]; #if EFSYS_HAS_UINT64 uint64_t eq_u64[1]; #endif uint32_t eq_u32[2]; uint16_t eq_u16[4]; uint8_t eq_u8[8]; } efx_qword_t; /* * An octword (i.e. 128-bit) datatype * * This datatype is defined to be little-endian. */ typedef union efx_oword_u { efx_byte_t eo_byte[16]; efx_word_t eo_word[8]; efx_dword_t eo_dword[4]; efx_qword_t eo_qword[2]; #if EFSYS_HAS_SSE2_M128 __m128i eo_u128[1]; #endif #if EFSYS_HAS_UINT64 uint64_t eo_u64[2]; #endif uint32_t eo_u32[4]; uint16_t eo_u16[8]; uint8_t eo_u8[16]; } efx_oword_t; #pragma pack() #define __SWAP16(_x) \ ((((_x) & 0xff) << 8) | \ (((_x) >> 8) & 0xff)) #define __SWAP32(_x) \ ((__SWAP16((_x) & 0xffff) << 16) | \ __SWAP16(((_x) >> 16) & 0xffff)) #define __SWAP64(_x) \ ((__SWAP32((_x) & 0xffffffff) << 32) | \ __SWAP32(((_x) >> 32) & 0xffffffff)) #define __NOSWAP16(_x) (_x) #define __NOSWAP32(_x) (_x) #define __NOSWAP64(_x) (_x) #if EFSYS_IS_BIG_ENDIAN #define __CPU_TO_LE_16(_x) ((uint16_t)__SWAP16(_x)) #define __LE_TO_CPU_16(_x) ((uint16_t)__SWAP16(_x)) #define __CPU_TO_BE_16(_x) ((uint16_t)__NOSWAP16(_x)) #define __BE_TO_CPU_16(_x) ((uint16_t)__NOSWAP16(_x)) #define __CPU_TO_LE_32(_x) ((uint32_t)__SWAP32(_x)) #define __LE_TO_CPU_32(_x) ((uint32_t)__SWAP32(_x)) #define __CPU_TO_BE_32(_x) ((uint32_t)__NOSWAP32(_x)) #define __BE_TO_CPU_32(_x) ((uint32_t)__NOSWAP32(_x)) #define __CPU_TO_LE_64(_x) ((uint64_t)__SWAP64(_x)) #define __LE_TO_CPU_64(_x) ((uint64_t)__SWAP64(_x)) #define __CPU_TO_BE_64(_x) ((uint64_t)__NOSWAP64(_x)) #define __BE_TO_CPU_64(_x) ((uint64_t)__NOSWAP64(_x)) #elif EFSYS_IS_LITTLE_ENDIAN #define __CPU_TO_LE_16(_x) ((uint16_t)__NOSWAP16(_x)) #define __LE_TO_CPU_16(_x) ((uint16_t)__NOSWAP16(_x)) #define __CPU_TO_BE_16(_x) ((uint16_t)__SWAP16(_x)) #define __BE_TO_CPU_16(_x) ((uint16_t)__SWAP16(_x)) #define __CPU_TO_LE_32(_x) ((uint32_t)__NOSWAP32(_x)) #define __LE_TO_CPU_32(_x) ((uint32_t)__NOSWAP32(_x)) #define __CPU_TO_BE_32(_x) ((uint32_t)__SWAP32(_x)) #define __BE_TO_CPU_32(_x) ((uint32_t)__SWAP32(_x)) #define __CPU_TO_LE_64(_x) ((uint64_t)__NOSWAP64(_x)) #define __LE_TO_CPU_64(_x) ((uint64_t)__NOSWAP64(_x)) #define __CPU_TO_BE_64(_x) ((uint64_t)__SWAP64(_x)) #define __BE_TO_CPU_64(_x) ((uint64_t)__SWAP64(_x)) #else #error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set" #endif #define __NATIVE_8(_x) (uint8_t)(_x) /* Format string for printing an efx_byte_t */ #define EFX_BYTE_FMT "0x%02x" /* Format string for printing an efx_word_t */ #define EFX_WORD_FMT "0x%04x" /* Format string for printing an efx_dword_t */ #define EFX_DWORD_FMT "0x%08x" /* Format string for printing an efx_qword_t */ #define EFX_QWORD_FMT "0x%08x:%08x" /* Format string for printing an efx_oword_t */ #define EFX_OWORD_FMT "0x%08x:%08x:%08x:%08x" /* Parameters for printing an efx_byte_t */ #define EFX_BYTE_VAL(_byte) \ ((unsigned int)__NATIVE_8((_byte).eb_u8[0])) /* Parameters for printing an efx_word_t */ #define EFX_WORD_VAL(_word) \ ((unsigned int)__LE_TO_CPU_16((_word).ew_u16[0])) /* Parameters for printing an efx_dword_t */ #define EFX_DWORD_VAL(_dword) \ ((unsigned int)__LE_TO_CPU_32((_dword).ed_u32[0])) /* Parameters for printing an efx_qword_t */ #define EFX_QWORD_VAL(_qword) \ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[1])), \ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[0])) /* Parameters for printing an efx_oword_t */ #define EFX_OWORD_VAL(_oword) \ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[3])), \ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[2])), \ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[1])), \ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[0])) /* * Stop lint complaining about some shifts. */ #ifdef __lint extern int fix_lint; #define FIX_LINT(_x) (_x + fix_lint) #else #define FIX_LINT(_x) (_x) #endif /* * Extract bit field portion [low,high) from the native-endian element * which contains bits [min,max). * * For example, suppose "element" represents the high 32 bits of a * 64-bit value, and we wish to extract the bits belonging to the bit * field occupying bits 28-45 of this 64-bit value. * * Then EFX_EXTRACT(_element, 32, 63, 28, 45) would give * * (_element) << 4 * * The result will contain the relevant bits filled in in the range * [0,high-low), with garbage in bits [high-low+1,...). */ #define EFX_EXTRACT_NATIVE(_element, _min, _max, _low, _high) \ ((FIX_LINT(_low > _max) || FIX_LINT(_high < _min)) ? \ 0U : \ ((_low > _min) ? \ ((_element) >> (_low - _min)) : \ ((_element) << (_min - _low)))) /* * Extract bit field portion [low,high) from the 64-bit little-endian * element which contains bits [min,max) */ #define EFX_EXTRACT64(_element, _min, _max, _low, _high) \ EFX_EXTRACT_NATIVE(__LE_TO_CPU_64(_element), _min, _max, _low, _high) /* * Extract bit field portion [low,high) from the 32-bit little-endian * element which contains bits [min,max) */ #define EFX_EXTRACT32(_element, _min, _max, _low, _high) \ EFX_EXTRACT_NATIVE(__LE_TO_CPU_32(_element), _min, _max, _low, _high) /* * Extract bit field portion [low,high) from the 16-bit little-endian * element which contains bits [min,max) */ #define EFX_EXTRACT16(_element, _min, _max, _low, _high) \ EFX_EXTRACT_NATIVE(__LE_TO_CPU_16(_element), _min, _max, _low, _high) /* * Extract bit field portion [low,high) from the 8-bit * element which contains bits [min,max) */ #define EFX_EXTRACT8(_element, _min, _max, _low, _high) \ EFX_EXTRACT_NATIVE(__NATIVE_8(_element), _min, _max, _low, _high) #define EFX_EXTRACT_OWORD64(_oword, _low, _high) \ (EFX_EXTRACT64((_oword).eo_u64[0], FIX_LINT(0), FIX_LINT(63), \ _low, _high) | \ EFX_EXTRACT64((_oword).eo_u64[1], FIX_LINT(64), FIX_LINT(127), \ _low, _high)) #define EFX_EXTRACT_OWORD32(_oword, _low, _high) \ (EFX_EXTRACT32((_oword).eo_u32[0], FIX_LINT(0), FIX_LINT(31), \ _low, _high) | \ EFX_EXTRACT32((_oword).eo_u32[1], FIX_LINT(32), FIX_LINT(63), \ _low, _high) | \ EFX_EXTRACT32((_oword).eo_u32[2], FIX_LINT(64), FIX_LINT(95), \ _low, _high) | \ EFX_EXTRACT32((_oword).eo_u32[3], FIX_LINT(96), FIX_LINT(127), \ _low, _high)) #define EFX_EXTRACT_QWORD64(_qword, _low, _high) \ (EFX_EXTRACT64((_qword).eq_u64[0], FIX_LINT(0), FIX_LINT(63), \ _low, _high)) #define EFX_EXTRACT_QWORD32(_qword, _low, _high) \ (EFX_EXTRACT32((_qword).eq_u32[0], FIX_LINT(0), FIX_LINT(31), \ _low, _high) | \ EFX_EXTRACT32((_qword).eq_u32[1], FIX_LINT(32), FIX_LINT(63), \ _low, _high)) #define EFX_EXTRACT_DWORD(_dword, _low, _high) \ (EFX_EXTRACT32((_dword).ed_u32[0], FIX_LINT(0), FIX_LINT(31), \ _low, _high)) #define EFX_EXTRACT_WORD(_word, _low, _high) \ (EFX_EXTRACT16((_word).ew_u16[0], FIX_LINT(0), FIX_LINT(15), \ _low, _high)) #define EFX_EXTRACT_BYTE(_byte, _low, _high) \ (EFX_EXTRACT8((_byte).eb_u8[0], FIX_LINT(0), FIX_LINT(7), \ _low, _high)) #define EFX_OWORD_FIELD64(_oword, _field) \ ((uint32_t)EFX_EXTRACT_OWORD64(_oword, EFX_LOW_BIT(_field), \ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field)) #define EFX_OWORD_FIELD32(_oword, _field) \ (EFX_EXTRACT_OWORD32(_oword, EFX_LOW_BIT(_field), \ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field)) #define EFX_QWORD_FIELD64(_qword, _field) \ ((uint32_t)EFX_EXTRACT_QWORD64(_qword, EFX_LOW_BIT(_field), \ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field)) #define EFX_QWORD_FIELD32(_qword, _field) \ (EFX_EXTRACT_QWORD32(_qword, EFX_LOW_BIT(_field), \ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field)) #define EFX_DWORD_FIELD(_dword, _field) \ (EFX_EXTRACT_DWORD(_dword, EFX_LOW_BIT(_field), \ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field)) #define EFX_WORD_FIELD(_word, _field) \ (EFX_EXTRACT_WORD(_word, EFX_LOW_BIT(_field), \ EFX_HIGH_BIT(_field)) & EFX_MASK16(_field)) #define EFX_BYTE_FIELD(_byte, _field) \ (EFX_EXTRACT_BYTE(_byte, EFX_LOW_BIT(_field), \ EFX_HIGH_BIT(_field)) & EFX_MASK8(_field)) #define EFX_OWORD_IS_EQUAL64(_oword_a, _oword_b) \ ((_oword_a).eo_u64[0] == (_oword_b).eo_u64[0] && \ (_oword_a).eo_u64[1] == (_oword_b).eo_u64[1]) #define EFX_OWORD_IS_EQUAL32(_oword_a, _oword_b) \ ((_oword_a).eo_u32[0] == (_oword_b).eo_u32[0] && \ (_oword_a).eo_u32[1] == (_oword_b).eo_u32[1] && \ (_oword_a).eo_u32[2] == (_oword_b).eo_u32[2] && \ (_oword_a).eo_u32[3] == (_oword_b).eo_u32[3]) #define EFX_QWORD_IS_EQUAL64(_qword_a, _qword_b) \ ((_qword_a).eq_u64[0] == (_qword_b).eq_u64[0]) #define EFX_QWORD_IS_EQUAL32(_qword_a, _qword_b) \ ((_qword_a).eq_u32[0] == (_qword_b).eq_u32[0] && \ (_qword_a).eq_u32[1] == (_qword_b).eq_u32[1]) #define EFX_DWORD_IS_EQUAL(_dword_a, _dword_b) \ ((_dword_a).ed_u32[0] == (_dword_b).ed_u32[0]) #define EFX_WORD_IS_EQUAL(_word_a, _word_b) \ ((_word_a).ew_u16[0] == (_word_b).ew_u16[0]) #define EFX_BYTE_IS_EQUAL(_byte_a, _byte_b) \ ((_byte_a).eb_u8[0] == (_byte_b).eb_u8[0]) #define EFX_OWORD_IS_ZERO64(_oword) \ (((_oword).eo_u64[0] | \ (_oword).eo_u64[1]) == 0) #define EFX_OWORD_IS_ZERO32(_oword) \ (((_oword).eo_u32[0] | \ (_oword).eo_u32[1] | \ (_oword).eo_u32[2] | \ (_oword).eo_u32[3]) == 0) #define EFX_QWORD_IS_ZERO64(_qword) \ (((_qword).eq_u64[0]) == 0) #define EFX_QWORD_IS_ZERO32(_qword) \ (((_qword).eq_u32[0] | \ (_qword).eq_u32[1]) == 0) #define EFX_DWORD_IS_ZERO(_dword) \ (((_dword).ed_u32[0]) == 0) #define EFX_WORD_IS_ZERO(_word) \ (((_word).ew_u16[0]) == 0) #define EFX_BYTE_IS_ZERO(_byte) \ (((_byte).eb_u8[0]) == 0) #define EFX_OWORD_IS_SET64(_oword) \ (((_oword).eo_u64[0] & \ (_oword).eo_u64[1]) == ~((uint64_t)0)) #define EFX_OWORD_IS_SET32(_oword) \ (((_oword).eo_u32[0] & \ (_oword).eo_u32[1] & \ (_oword).eo_u32[2] & \ (_oword).eo_u32[3]) == ~((uint32_t)0)) #define EFX_QWORD_IS_SET64(_qword) \ (((_qword).eq_u64[0]) == ~((uint64_t)0)) #define EFX_QWORD_IS_SET32(_qword) \ (((_qword).eq_u32[0] & \ (_qword).eq_u32[1]) == ~((uint32_t)0)) #define EFX_DWORD_IS_SET(_dword) \ ((_dword).ed_u32[0] == ~((uint32_t)0)) #define EFX_WORD_IS_SET(_word) \ ((_word).ew_u16[0] == ~((uint16_t)0)) #define EFX_BYTE_IS_SET(_byte) \ ((_byte).eb_u8[0] == ~((uint8_t)0)) /* * Construct bit field portion * * Creates the portion of the bit field [low,high) that lies within * the range [min,max). */ #define EFX_INSERT_NATIVE64(_min, _max, _low, _high, _value) \ (((_low > _max) || (_high < _min)) ? \ 0U : \ ((_low > _min) ? \ (((uint64_t)(_value)) << (_low - _min)) : \ (((uint64_t)(_value)) >> (_min - _low)))) #define EFX_INSERT_NATIVE32(_min, _max, _low, _high, _value) \ (((_low > _max) || (_high < _min)) ? \ 0U : \ ((_low > _min) ? \ (((uint32_t)(_value)) << (_low - _min)) : \ (((uint32_t)(_value)) >> (_min - _low)))) #define EFX_INSERT_NATIVE16(_min, _max, _low, _high, _value) \ (((_low > _max) || (_high < _min)) ? \ 0U : \ (uint16_t)((_low > _min) ? \ ((_value) << (_low - _min)) : \ ((_value) >> (_min - _low)))) #define EFX_INSERT_NATIVE8(_min, _max, _low, _high, _value) \ (((_low > _max) || (_high < _min)) ? \ 0U : \ (uint8_t)((_low > _min) ? \ ((_value) << (_low - _min)) : \ ((_value) >> (_min - _low)))) /* * Construct bit field portion * * Creates the portion of the named bit field that lies within the * range [min,max). */ #define EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value) \ EFX_INSERT_NATIVE64(_min, _max, EFX_LOW_BIT(_field), \ EFX_HIGH_BIT(_field), _value) #define EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value) \ EFX_INSERT_NATIVE32(_min, _max, EFX_LOW_BIT(_field), \ EFX_HIGH_BIT(_field), _value) #define EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value) \ EFX_INSERT_NATIVE16(_min, _max, EFX_LOW_BIT(_field), \ EFX_HIGH_BIT(_field), _value) #define EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value) \ EFX_INSERT_NATIVE8(_min, _max, EFX_LOW_BIT(_field), \ EFX_HIGH_BIT(_field), _value) /* * Construct bit field * * Creates the portion of the named bit fields that lie within the * range [min,max). */ #define EFX_INSERT_FIELDS64(_min, _max, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9, \ _field10, _value10) \ __CPU_TO_LE_64( \ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field1, _value1) | \ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field2, _value2) | \ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field3, _value3) | \ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field4, _value4) | \ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field5, _value5) | \ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field6, _value6) | \ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field7, _value7) | \ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field8, _value8) | \ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field9, _value9) | \ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field10, _value10)) #define EFX_INSERT_FIELDS32(_min, _max, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9, \ _field10, _value10) \ __CPU_TO_LE_32( \ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field1, _value1) | \ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field2, _value2) | \ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field3, _value3) | \ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field4, _value4) | \ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field5, _value5) | \ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field6, _value6) | \ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field7, _value7) | \ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field8, _value8) | \ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field9, _value9) | \ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field10, _value10)) #define EFX_INSERT_FIELDS16(_min, _max, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9, \ _field10, _value10) \ __CPU_TO_LE_16( \ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field1, _value1) | \ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field2, _value2) | \ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field3, _value3) | \ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field4, _value4) | \ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field5, _value5) | \ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field6, _value6) | \ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field7, _value7) | \ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field8, _value8) | \ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field9, _value9) | \ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field10, _value10)) #define EFX_INSERT_FIELDS8(_min, _max, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9, \ _field10, _value10) \ __NATIVE_8( \ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field1, _value1) | \ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field2, _value2) | \ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field3, _value3) | \ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field4, _value4) | \ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field5, _value5) | \ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field6, _value6) | \ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field7, _value7) | \ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field8, _value8) | \ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field9, _value9) | \ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field10, _value10)) #define EFX_POPULATE_OWORD64(_oword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9, \ _field10, _value10) \ do { \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u64[0] = EFX_INSERT_FIELDS64(0, 63, \ _field1, _value1, _field2, _value2, \ _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, \ _field9, _value9, _field10, _value10); \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u64[1] = EFX_INSERT_FIELDS64(64, 127, \ _field1, _value1, _field2, _value2, \ _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, \ _field9, _value9, _field10, _value10); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_POPULATE_OWORD32(_oword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9, \ _field10, _value10) \ do { \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u32[0] = EFX_INSERT_FIELDS32(0, 31, \ _field1, _value1, _field2, _value2, \ _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, \ _field9, _value9, _field10, _value10); \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u32[1] = EFX_INSERT_FIELDS32(32, 63, \ _field1, _value1, _field2, _value2, \ _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, \ _field9, _value9, _field10, _value10); \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u32[2] = EFX_INSERT_FIELDS32(64, 95, \ _field1, _value1, _field2, _value2, \ _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, \ _field9, _value9, _field10, _value10); \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u32[3] = EFX_INSERT_FIELDS32(96, 127, \ _field1, _value1, _field2, _value2, \ _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, \ _field9, _value9, _field10, _value10); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_POPULATE_QWORD64(_qword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9, \ _field10, _value10) \ do { \ _NOTE(CONSTANTCONDITION) \ (_qword).eq_u64[0] = EFX_INSERT_FIELDS64(0, 63, \ _field1, _value1, _field2, _value2, \ _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, \ _field9, _value9, _field10, _value10); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_POPULATE_QWORD32(_qword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9, \ _field10, _value10) \ do { \ _NOTE(CONSTANTCONDITION) \ (_qword).eq_u32[0] = EFX_INSERT_FIELDS32(0, 31, \ _field1, _value1, _field2, _value2, \ _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, \ _field9, _value9, _field10, _value10); \ _NOTE(CONSTANTCONDITION) \ (_qword).eq_u32[1] = EFX_INSERT_FIELDS32(32, 63, \ _field1, _value1, _field2, _value2, \ _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, \ _field9, _value9, _field10, _value10); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_POPULATE_DWORD(_dword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9, \ _field10, _value10) \ do { \ _NOTE(CONSTANTCONDITION) \ (_dword).ed_u32[0] = EFX_INSERT_FIELDS32(0, 31, \ _field1, _value1, _field2, _value2, \ _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, \ _field9, _value9, _field10, _value10); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_POPULATE_WORD(_word, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9, \ _field10, _value10) \ do { \ _NOTE(CONSTANTCONDITION) \ (_word).ew_u16[0] = EFX_INSERT_FIELDS16(0, 15, \ _field1, _value1, _field2, _value2, \ _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, \ _field9, _value9, _field10, _value10); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_POPULATE_BYTE(_byte, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9, \ _field10, _value10) \ do { \ _NOTE(CONSTANTCONDITION) \ (_byte).eb_u8[0] = EFX_INSERT_FIELDS8(0, 7, \ _field1, _value1, _field2, _value2, \ _field3, _value3, _field4, _value4, \ _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, \ _field9, _value9, _field10, _value10); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* Populate an octword field with various numbers of arguments */ #define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD #define EFX_POPULATE_OWORD_9(_oword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9) \ EFX_POPULATE_OWORD_10(_oword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9) #define EFX_POPULATE_OWORD_8(_oword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8) \ EFX_POPULATE_OWORD_9(_oword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8) #define EFX_POPULATE_OWORD_7(_oword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7) \ EFX_POPULATE_OWORD_8(_oword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7) #define EFX_POPULATE_OWORD_6(_oword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6) \ EFX_POPULATE_OWORD_7(_oword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6) #define EFX_POPULATE_OWORD_5(_oword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5) \ EFX_POPULATE_OWORD_6(_oword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5) #define EFX_POPULATE_OWORD_4(_oword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4) \ EFX_POPULATE_OWORD_5(_oword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4) #define EFX_POPULATE_OWORD_3(_oword, \ _field1, _value1, _field2, _value2, _field3, _value3) \ EFX_POPULATE_OWORD_4(_oword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3) #define EFX_POPULATE_OWORD_2(_oword, \ _field1, _value1, _field2, _value2) \ EFX_POPULATE_OWORD_3(_oword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2) #define EFX_POPULATE_OWORD_1(_oword, \ _field1, _value1) \ EFX_POPULATE_OWORD_2(_oword, EFX_DUMMY_FIELD, 0, \ _field1, _value1) #define EFX_ZERO_OWORD(_oword) \ EFX_POPULATE_OWORD_1(_oword, EFX_DUMMY_FIELD, 0) #define EFX_SET_OWORD(_oword) \ EFX_POPULATE_OWORD_4(_oword, \ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff, \ EFX_DWORD_2, 0xffffffff, EFX_DWORD_3, 0xffffffff) /* Populate a quadword field with various numbers of arguments */ #define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD #define EFX_POPULATE_QWORD_9(_qword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9) \ EFX_POPULATE_QWORD_10(_qword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9) #define EFX_POPULATE_QWORD_8(_qword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8) \ EFX_POPULATE_QWORD_9(_qword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8) #define EFX_POPULATE_QWORD_7(_qword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7) \ EFX_POPULATE_QWORD_8(_qword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7) #define EFX_POPULATE_QWORD_6(_qword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6) \ EFX_POPULATE_QWORD_7(_qword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6) #define EFX_POPULATE_QWORD_5(_qword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5) \ EFX_POPULATE_QWORD_6(_qword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5) #define EFX_POPULATE_QWORD_4(_qword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4) \ EFX_POPULATE_QWORD_5(_qword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4) #define EFX_POPULATE_QWORD_3(_qword, \ _field1, _value1, _field2, _value2, _field3, _value3) \ EFX_POPULATE_QWORD_4(_qword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3) #define EFX_POPULATE_QWORD_2(_qword, \ _field1, _value1, _field2, _value2) \ EFX_POPULATE_QWORD_3(_qword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2) #define EFX_POPULATE_QWORD_1(_qword, \ _field1, _value1) \ EFX_POPULATE_QWORD_2(_qword, EFX_DUMMY_FIELD, 0, \ _field1, _value1) #define EFX_ZERO_QWORD(_qword) \ EFX_POPULATE_QWORD_1(_qword, EFX_DUMMY_FIELD, 0) #define EFX_SET_QWORD(_qword) \ EFX_POPULATE_QWORD_2(_qword, \ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff) /* Populate a dword field with various numbers of arguments */ #define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD #define EFX_POPULATE_DWORD_9(_dword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9) \ EFX_POPULATE_DWORD_10(_dword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9) #define EFX_POPULATE_DWORD_8(_dword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8) \ EFX_POPULATE_DWORD_9(_dword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8) #define EFX_POPULATE_DWORD_7(_dword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7) \ EFX_POPULATE_DWORD_8(_dword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7) #define EFX_POPULATE_DWORD_6(_dword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6) \ EFX_POPULATE_DWORD_7(_dword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6) #define EFX_POPULATE_DWORD_5(_dword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5) \ EFX_POPULATE_DWORD_6(_dword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5) #define EFX_POPULATE_DWORD_4(_dword, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4) \ EFX_POPULATE_DWORD_5(_dword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4) #define EFX_POPULATE_DWORD_3(_dword, \ _field1, _value1, _field2, _value2, _field3, _value3) \ EFX_POPULATE_DWORD_4(_dword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3) #define EFX_POPULATE_DWORD_2(_dword, \ _field1, _value1, _field2, _value2) \ EFX_POPULATE_DWORD_3(_dword, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2) #define EFX_POPULATE_DWORD_1(_dword, \ _field1, _value1) \ EFX_POPULATE_DWORD_2(_dword, EFX_DUMMY_FIELD, 0, \ _field1, _value1) #define EFX_ZERO_DWORD(_dword) \ EFX_POPULATE_DWORD_1(_dword, EFX_DUMMY_FIELD, 0) #define EFX_SET_DWORD(_dword) \ EFX_POPULATE_DWORD_1(_dword, \ EFX_DWORD_0, 0xffffffff) /* Populate a word field with various numbers of arguments */ #define EFX_POPULATE_WORD_10 EFX_POPULATE_WORD #define EFX_POPULATE_WORD_9(_word, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9) \ EFX_POPULATE_WORD_10(_word, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9) #define EFX_POPULATE_WORD_8(_word, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8) \ EFX_POPULATE_WORD_9(_word, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8) #define EFX_POPULATE_WORD_7(_word, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7) \ EFX_POPULATE_WORD_8(_word, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7) #define EFX_POPULATE_WORD_6(_word, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6) \ EFX_POPULATE_WORD_7(_word, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6) #define EFX_POPULATE_WORD_5(_word, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5) \ EFX_POPULATE_WORD_6(_word, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5) #define EFX_POPULATE_WORD_4(_word, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4) \ EFX_POPULATE_WORD_5(_word, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4) #define EFX_POPULATE_WORD_3(_word, \ _field1, _value1, _field2, _value2, _field3, _value3) \ EFX_POPULATE_WORD_4(_word, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3) #define EFX_POPULATE_WORD_2(_word, \ _field1, _value1, _field2, _value2) \ EFX_POPULATE_WORD_3(_word, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2) #define EFX_POPULATE_WORD_1(_word, \ _field1, _value1) \ EFX_POPULATE_WORD_2(_word, EFX_DUMMY_FIELD, 0, \ _field1, _value1) #define EFX_ZERO_WORD(_word) \ EFX_POPULATE_WORD_1(_word, EFX_DUMMY_FIELD, 0) #define EFX_SET_WORD(_word) \ EFX_POPULATE_WORD_1(_word, \ EFX_WORD_0, 0xffff) /* Populate a byte field with various numbers of arguments */ #define EFX_POPULATE_BYTE_10 EFX_POPULATE_BYTE #define EFX_POPULATE_BYTE_9(_byte, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9) \ EFX_POPULATE_BYTE_10(_byte, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8, _field9, _value9) #define EFX_POPULATE_BYTE_8(_byte, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8) \ EFX_POPULATE_BYTE_9(_byte, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7, _field8, _value8) #define EFX_POPULATE_BYTE_7(_byte, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7) \ EFX_POPULATE_BYTE_8(_byte, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6, \ _field7, _value7) #define EFX_POPULATE_BYTE_6(_byte, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6) \ EFX_POPULATE_BYTE_7(_byte, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5, _field6, _value6) #define EFX_POPULATE_BYTE_5(_byte, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5) \ EFX_POPULATE_BYTE_6(_byte, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4, _field5, _value5) #define EFX_POPULATE_BYTE_4(_byte, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4) \ EFX_POPULATE_BYTE_5(_byte, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3, \ _field4, _value4) #define EFX_POPULATE_BYTE_3(_byte, \ _field1, _value1, _field2, _value2, _field3, _value3) \ EFX_POPULATE_BYTE_4(_byte, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2, _field3, _value3) #define EFX_POPULATE_BYTE_2(_byte, \ _field1, _value1, _field2, _value2) \ EFX_POPULATE_BYTE_3(_byte, EFX_DUMMY_FIELD, 0, \ _field1, _value1, _field2, _value2) #define EFX_POPULATE_BYTE_1(_byte, \ _field1, _value1) \ EFX_POPULATE_BYTE_2(_byte, EFX_DUMMY_FIELD, 0, \ _field1, _value1) #define EFX_ZERO_BYTE(_byte) \ EFX_POPULATE_BYTE_1(_byte, EFX_DUMMY_FIELD, 0) #define EFX_SET_BYTE(_byte) \ EFX_POPULATE_BYTE_1(_byte, \ EFX_BYTE_0, 0xff) /* * Modify a named field within an already-populated structure. Used * for read-modify-write operations. */ #define EFX_INSERT_FIELD64(_min, _max, _field, _value) \ __CPU_TO_LE_64(EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value)) #define EFX_INSERT_FIELD32(_min, _max, _field, _value) \ __CPU_TO_LE_32(EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value)) #define EFX_INSERT_FIELD16(_min, _max, _field, _value) \ __CPU_TO_LE_16(EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value)) #define EFX_INSERT_FIELD8(_min, _max, _field, _value) \ __NATIVE_8(EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value)) #define EFX_INPLACE_MASK64(_min, _max, _field) \ EFX_INSERT_FIELD64(_min, _max, _field, EFX_MASK64(_field)) #define EFX_INPLACE_MASK32(_min, _max, _field) \ EFX_INSERT_FIELD32(_min, _max, _field, EFX_MASK32(_field)) #define EFX_INPLACE_MASK16(_min, _max, _field) \ EFX_INSERT_FIELD16(_min, _max, _field, EFX_MASK16(_field)) #define EFX_INPLACE_MASK8(_min, _max, _field) \ EFX_INSERT_FIELD8(_min, _max, _field, EFX_MASK8(_field)) #define EFX_SET_OWORD_FIELD64(_oword, _field, _value) \ do { \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u64[0] = (((_oword).eo_u64[0] & \ ~EFX_INPLACE_MASK64(0, 63, _field)) | \ EFX_INSERT_FIELD64(0, 63, _field, _value)); \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u64[1] = (((_oword).eo_u64[1] & \ ~EFX_INPLACE_MASK64(64, 127, _field)) | \ EFX_INSERT_FIELD64(64, 127, _field, _value)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_SET_OWORD_FIELD32(_oword, _field, _value) \ do { \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u32[0] = (((_oword).eo_u32[0] & \ ~EFX_INPLACE_MASK32(0, 31, _field)) | \ EFX_INSERT_FIELD32(0, 31, _field, _value)); \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u32[1] = (((_oword).eo_u32[1] & \ ~EFX_INPLACE_MASK32(32, 63, _field)) | \ EFX_INSERT_FIELD32(32, 63, _field, _value)); \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u32[2] = (((_oword).eo_u32[2] & \ ~EFX_INPLACE_MASK32(64, 95, _field)) | \ EFX_INSERT_FIELD32(64, 95, _field, _value)); \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u32[3] = (((_oword).eo_u32[3] & \ ~EFX_INPLACE_MASK32(96, 127, _field)) | \ EFX_INSERT_FIELD32(96, 127, _field, _value)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_SET_QWORD_FIELD64(_qword, _field, _value) \ do { \ _NOTE(CONSTANTCONDITION) \ (_qword).eq_u64[0] = (((_qword).eq_u64[0] & \ ~EFX_INPLACE_MASK64(0, 63, _field)) | \ EFX_INSERT_FIELD64(0, 63, _field, _value)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_SET_QWORD_FIELD32(_qword, _field, _value) \ do { \ _NOTE(CONSTANTCONDITION) \ (_qword).eq_u32[0] = (((_qword).eq_u32[0] & \ ~EFX_INPLACE_MASK32(0, 31, _field)) | \ EFX_INSERT_FIELD32(0, 31, _field, _value)); \ _NOTE(CONSTANTCONDITION) \ (_qword).eq_u32[1] = (((_qword).eq_u32[1] & \ ~EFX_INPLACE_MASK32(32, 63, _field)) | \ EFX_INSERT_FIELD32(32, 63, _field, _value)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_SET_DWORD_FIELD(_dword, _field, _value) \ do { \ _NOTE(CONSTANTCONDITION) \ (_dword).ed_u32[0] = (((_dword).ed_u32[0] & \ ~EFX_INPLACE_MASK32(0, 31, _field)) | \ EFX_INSERT_FIELD32(0, 31, _field, _value)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_SET_WORD_FIELD(_word, _field, _value) \ do { \ _NOTE(CONSTANTCONDITION) \ (_word).ew_u16[0] = (((_word).ew_u16[0] & \ ~EFX_INPLACE_MASK16(0, 15, _field)) | \ EFX_INSERT_FIELD16(0, 15, _field, _value)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_SET_BYTE_FIELD(_byte, _field, _value) \ do { \ _NOTE(CONSTANTCONDITION) \ (_byte).eb_u8[0] = (((_byte).eb_u8[0] & \ ~EFX_INPLACE_MASK8(0, 7, _field)) | \ EFX_INSERT_FIELD8(0, 7, _field, _value)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* * Set or clear a numbered bit within an octword. */ #define EFX_SHIFT64(_bit, _base) \ (((_bit) >= (_base) && (_bit) < (_base) + 64) ? \ ((uint64_t)1 << ((_bit) - (_base))) : \ 0U) #define EFX_SHIFT32(_bit, _base) \ (((_bit) >= (_base) && (_bit) < (_base) + 32) ? \ ((uint32_t)1 << ((_bit) - (_base))) : \ 0U) #define EFX_SHIFT16(_bit, _base) \ (((_bit) >= (_base) && (_bit) < (_base) + 16) ? \ (uint16_t)(1 << ((_bit) - (_base))) : \ 0U) #define EFX_SHIFT8(_bit, _base) \ (((_bit) >= (_base) && (_bit) < (_base) + 8) ? \ (uint8_t)(1 << ((_bit) - (_base))) : \ 0U) #define EFX_SET_OWORD_BIT64(_oword, _bit) \ do { \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u64[0] |= \ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \ (_oword).eo_u64[1] |= \ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_SET_OWORD_BIT32(_oword, _bit) \ do { \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u32[0] |= \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \ (_oword).eo_u32[1] |= \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \ (_oword).eo_u32[2] |= \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64))); \ (_oword).eo_u32[3] |= \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_CLEAR_OWORD_BIT64(_oword, _bit) \ do { \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u64[0] &= \ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \ (_oword).eo_u64[1] &= \ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(64))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_CLEAR_OWORD_BIT32(_oword, _bit) \ do { \ _NOTE(CONSTANTCONDITION) \ (_oword).eo_u32[0] &= \ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \ (_oword).eo_u32[1] &= \ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \ (_oword).eo_u32[2] &= \ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(64))); \ (_oword).eo_u32[3] &= \ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(96))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_TEST_OWORD_BIT64(_oword, _bit) \ (((_oword).eo_u64[0] & \ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0)))) || \ ((_oword).eo_u64[1] & \ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64))))) #define EFX_TEST_OWORD_BIT32(_oword, _bit) \ (((_oword).eo_u32[0] & \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) || \ ((_oword).eo_u32[1] & \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32)))) || \ ((_oword).eo_u32[2] & \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64)))) || \ ((_oword).eo_u32[3] & \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96))))) #define EFX_SET_QWORD_BIT64(_qword, _bit) \ do { \ _NOTE(CONSTANTCONDITION) \ (_qword).eq_u64[0] |= \ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_SET_QWORD_BIT32(_qword, _bit) \ do { \ _NOTE(CONSTANTCONDITION) \ (_qword).eq_u32[0] |= \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \ (_qword).eq_u32[1] |= \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_CLEAR_QWORD_BIT64(_qword, _bit) \ do { \ _NOTE(CONSTANTCONDITION) \ (_qword).eq_u64[0] &= \ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_CLEAR_QWORD_BIT32(_qword, _bit) \ do { \ _NOTE(CONSTANTCONDITION) \ (_qword).eq_u32[0] &= \ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \ (_qword).eq_u32[1] &= \ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_TEST_QWORD_BIT64(_qword, _bit) \ (((_qword).eq_u64[0] & \ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0)))) != 0) #define EFX_TEST_QWORD_BIT32(_qword, _bit) \ (((_qword).eq_u32[0] & \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) || \ ((_qword).eq_u32[1] & \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))))) #define EFX_SET_DWORD_BIT(_dword, _bit) \ do { \ (_dword).ed_u32[0] |= \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_CLEAR_DWORD_BIT(_dword, _bit) \ do { \ (_dword).ed_u32[0] &= \ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_TEST_DWORD_BIT(_dword, _bit) \ (((_dword).ed_u32[0] & \ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) != 0) #define EFX_SET_WORD_BIT(_word, _bit) \ do { \ (_word).ew_u16[0] |= \ __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_CLEAR_WORD_BIT(_word, _bit) \ do { \ (_word).ew_u32[0] &= \ __CPU_TO_LE_16(~EFX_SHIFT16(_bit, FIX_LINT(0))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_TEST_WORD_BIT(_word, _bit) \ (((_word).ew_u16[0] & \ __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0)))) != 0) #define EFX_SET_BYTE_BIT(_byte, _bit) \ do { \ (_byte).eb_u8[0] |= \ __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_CLEAR_BYTE_BIT(_byte, _bit) \ do { \ (_byte).eb_u8[0] &= \ __NATIVE_8(~EFX_SHIFT8(_bit, FIX_LINT(0))); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_TEST_BYTE_BIT(_byte, _bit) \ (((_byte).eb_u8[0] & \ __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0)))) != 0) #define EFX_OR_OWORD64(_oword1, _oword2) \ do { \ (_oword1).eo_u64[0] |= (_oword2).eo_u64[0]; \ (_oword1).eo_u64[1] |= (_oword2).eo_u64[1]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_OR_OWORD32(_oword1, _oword2) \ do { \ (_oword1).eo_u32[0] |= (_oword2).eo_u32[0]; \ (_oword1).eo_u32[1] |= (_oword2).eo_u32[1]; \ (_oword1).eo_u32[2] |= (_oword2).eo_u32[2]; \ (_oword1).eo_u32[3] |= (_oword2).eo_u32[3]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_AND_OWORD64(_oword1, _oword2) \ do { \ (_oword1).eo_u64[0] &= (_oword2).eo_u64[0]; \ (_oword1).eo_u64[1] &= (_oword2).eo_u64[1]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_AND_OWORD32(_oword1, _oword2) \ do { \ (_oword1).eo_u32[0] &= (_oword2).eo_u32[0]; \ (_oword1).eo_u32[1] &= (_oword2).eo_u32[1]; \ (_oword1).eo_u32[2] &= (_oword2).eo_u32[2]; \ (_oword1).eo_u32[3] &= (_oword2).eo_u32[3]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_OR_QWORD64(_qword1, _qword2) \ do { \ (_qword1).eq_u64[0] |= (_qword2).eq_u64[0]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_OR_QWORD32(_qword1, _qword2) \ do { \ (_qword1).eq_u32[0] |= (_qword2).eq_u32[0]; \ (_qword1).eq_u32[1] |= (_qword2).eq_u32[1]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_AND_QWORD64(_qword1, _qword2) \ do { \ (_qword1).eq_u64[0] &= (_qword2).eq_u64[0]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_AND_QWORD32(_qword1, _qword2) \ do { \ (_qword1).eq_u32[0] &= (_qword2).eq_u32[0]; \ (_qword1).eq_u32[1] &= (_qword2).eq_u32[1]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_OR_DWORD(_dword1, _dword2) \ do { \ (_dword1).ed_u32[0] |= (_dword2).ed_u32[0]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_AND_DWORD(_dword1, _dword2) \ do { \ (_dword1).ed_u32[0] &= (_dword2).ed_u32[0]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_OR_WORD(_word1, _word2) \ do { \ (_word1).ew_u16[0] |= (_word2).ew_u16[0]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_AND_WORD(_word1, _word2) \ do { \ (_word1).ew_u16[0] &= (_word2).ew_u16[0]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_OR_BYTE(_byte1, _byte2) \ do { \ (_byte1).eb_u8[0] |= (_byte2).eb_u8[0]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_AND_BYTE(_byte1, _byte2) \ do { \ (_byte1).eb_u8[0] &= (_byte2).eb_u8[0]; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #if EFSYS_USE_UINT64 #define EFX_OWORD_FIELD EFX_OWORD_FIELD64 #define EFX_QWORD_FIELD EFX_QWORD_FIELD64 #define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL64 #define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL64 #define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64 #define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64 #define EFX_OWORD_IS_SET EFX_OWORD_IS_SET64 #define EFX_QWORD_IS_SET EFX_QWORD_IS_SET64 #define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64 #define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64 #define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64 #define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64 #define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT64 #define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT64 #define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT64 #define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT64 #define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT64 #define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT64 #define EFX_OR_OWORD EFX_OR_OWORD64 #define EFX_AND_OWORD EFX_AND_OWORD64 #define EFX_OR_QWORD EFX_OR_QWORD64 #define EFX_AND_QWORD EFX_AND_QWORD64 #else #define EFX_OWORD_FIELD EFX_OWORD_FIELD32 #define EFX_QWORD_FIELD EFX_QWORD_FIELD32 #define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL32 #define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL32 #define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32 #define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32 #define EFX_OWORD_IS_SET EFX_OWORD_IS_SET32 #define EFX_QWORD_IS_SET EFX_QWORD_IS_SET32 #define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32 #define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32 #define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32 #define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 #define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT32 #define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT32 #define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT32 #define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT32 #define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT32 #define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT32 #define EFX_OR_OWORD EFX_OR_OWORD32 #define EFX_AND_OWORD EFX_AND_OWORD32 #define EFX_OR_QWORD EFX_OR_QWORD32 #define EFX_AND_QWORD EFX_AND_QWORD32 #endif #ifdef __cplusplus } #endif #endif /* _SYS_EFX_TYPES_H */ Index: projects/clang700-import/sys/dev/sfxge/common/efx_vpd.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/efx_vpd.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/efx_vpd.c (revision 340918) @@ -1,1021 +1,1021 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_VPD #define TAG_TYPE_LBN 7 #define TAG_TYPE_WIDTH 1 #define TAG_TYPE_LARGE_ITEM_DECODE 1 #define TAG_TYPE_SMALL_ITEM_DECODE 0 #define TAG_SMALL_ITEM_NAME_LBN 3 #define TAG_SMALL_ITEM_NAME_WIDTH 4 #define TAG_SMALL_ITEM_SIZE_LBN 0 #define TAG_SMALL_ITEM_SIZE_WIDTH 3 #define TAG_LARGE_ITEM_NAME_LBN 0 #define TAG_LARGE_ITEM_NAME_WIDTH 7 #define TAG_NAME_END_DECODE 0x0f #define TAG_NAME_ID_STRING_DECODE 0x02 #define TAG_NAME_VPD_R_DECODE 0x10 #define TAG_NAME_VPD_W_DECODE 0x11 #if EFSYS_OPT_SIENA static const efx_vpd_ops_t __efx_vpd_siena_ops = { siena_vpd_init, /* evpdo_init */ siena_vpd_size, /* evpdo_size */ siena_vpd_read, /* evpdo_read */ siena_vpd_verify, /* evpdo_verify */ siena_vpd_reinit, /* evpdo_reinit */ siena_vpd_get, /* evpdo_get */ siena_vpd_set, /* evpdo_set */ siena_vpd_next, /* evpdo_next */ siena_vpd_write, /* evpdo_write */ siena_vpd_fini, /* evpdo_fini */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD static const efx_vpd_ops_t __efx_vpd_ef10_ops = { ef10_vpd_init, /* evpdo_init */ ef10_vpd_size, /* evpdo_size */ ef10_vpd_read, /* evpdo_read */ ef10_vpd_verify, /* evpdo_verify */ ef10_vpd_reinit, /* evpdo_reinit */ ef10_vpd_get, /* evpdo_get */ ef10_vpd_set, /* evpdo_set */ ef10_vpd_next, /* evpdo_next */ ef10_vpd_write, /* evpdo_write */ ef10_vpd_fini, /* evpdo_fini */ }; #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_vpd_init( __in efx_nic_t *enp) { const efx_vpd_ops_t *evpdop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_VPD)); switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: evpdop = &__efx_vpd_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: evpdop = &__efx_vpd_ef10_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: evpdop = &__efx_vpd_ef10_ops; break; #endif /* EFSYS_OPT_MEDFORD */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail1; } if (evpdop->evpdo_init != NULL) { if ((rc = evpdop->evpdo_init(enp)) != 0) goto fail2; } enp->en_evpdop = evpdop; enp->en_mod_flags |= EFX_MOD_VPD; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_vpd_size( __in efx_nic_t *enp, __out size_t *sizep) { const efx_vpd_ops_t *evpdop = enp->en_evpdop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_size(enp, sizep)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size) { const efx_vpd_ops_t *evpdop = enp->en_evpdop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_read(enp, data, size)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { const efx_vpd_ops_t *evpdop = enp->en_evpdop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_verify(enp, data, size)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { const efx_vpd_ops_t *evpdop = enp->en_evpdop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if (evpdop->evpdo_reinit == NULL) { rc = ENOTSUP; goto fail1; } if ((rc = evpdop->evpdo_reinit(enp, data, size)) != 0) goto fail2; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp) { const efx_vpd_ops_t *evpdop = enp->en_evpdop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_get(enp, data, size, evvp)) != 0) { if (rc == ENOENT) return (rc); goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_vpd_set( __in efx_nic_t *enp, __inout_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp) { const efx_vpd_ops_t *evpdop = enp->en_evpdop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_set(enp, data, size, evvp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_vpd_next( __in efx_nic_t *enp, __inout_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp) { const efx_vpd_ops_t *evpdop = enp->en_evpdop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_next(enp, data, size, evvp, contp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size) { const efx_vpd_ops_t *evpdop = enp->en_evpdop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if ((rc = evpdop->evpdo_write(enp, data, size)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_vpd_next_tag( __in caddr_t data, __in size_t size, __inout unsigned int *offsetp, __out efx_vpd_tag_t *tagp, __out uint16_t *lengthp) { efx_byte_t byte; efx_word_t word; uint8_t name; uint16_t length; size_t headlen; efx_rc_t rc; if (*offsetp >= size) { rc = EFAULT; goto fail1; } EFX_POPULATE_BYTE_1(byte, EFX_BYTE_0, data[*offsetp]); switch (EFX_BYTE_FIELD(byte, TAG_TYPE)) { case TAG_TYPE_SMALL_ITEM_DECODE: headlen = 1; name = EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_NAME); length = (uint16_t)EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_SIZE); break; case TAG_TYPE_LARGE_ITEM_DECODE: headlen = 3; if (*offsetp + headlen > size) { rc = EFAULT; goto fail2; } name = EFX_BYTE_FIELD(byte, TAG_LARGE_ITEM_NAME); EFX_POPULATE_WORD_2(word, EFX_BYTE_0, data[*offsetp + 1], EFX_BYTE_1, data[*offsetp + 2]); length = EFX_WORD_FIELD(word, EFX_WORD_0); break; default: rc = EFAULT; goto fail2; } if (*offsetp + headlen + length > size) { rc = EFAULT; goto fail3; } EFX_STATIC_ASSERT(TAG_NAME_END_DECODE == EFX_VPD_END); EFX_STATIC_ASSERT(TAG_NAME_ID_STRING_DECODE == EFX_VPD_ID); EFX_STATIC_ASSERT(TAG_NAME_VPD_R_DECODE == EFX_VPD_RO); EFX_STATIC_ASSERT(TAG_NAME_VPD_W_DECODE == EFX_VPD_RW); if (name != EFX_VPD_END && name != EFX_VPD_ID && name != EFX_VPD_RO) { rc = EFAULT; goto fail4; } *tagp = name; *lengthp = length; *offsetp += headlen; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_vpd_next_keyword( __in_bcount(size) caddr_t tag, __in size_t size, __in unsigned int pos, __out efx_vpd_keyword_t *keywordp, __out uint8_t *lengthp) { efx_vpd_keyword_t keyword; uint8_t length; efx_rc_t rc; if (pos + 3U > size) { rc = EFAULT; goto fail1; } keyword = EFX_VPD_KEYWORD(tag[pos], tag[pos + 1]); length = tag[pos + 2]; if (length == 0 || pos + 3U + length > size) { rc = EFAULT; goto fail2; } *keywordp = keyword; *lengthp = length; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_vpd_hunk_length( __in_bcount(size) caddr_t data, __in size_t size, __out size_t *lengthp) { efx_vpd_tag_t tag; unsigned int offset; uint16_t taglen; efx_rc_t rc; offset = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_next_tag(data, size, &offset, &tag, &taglen)) != 0) goto fail1; offset += taglen; if (tag == EFX_VPD_END) break; } *lengthp = offset; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_vpd_hunk_verify( __in_bcount(size) caddr_t data, __in size_t size, __out_opt boolean_t *cksummedp) { efx_vpd_tag_t tag; efx_vpd_keyword_t keyword; unsigned int offset; unsigned int pos; unsigned int i; uint16_t taglen; uint8_t keylen; uint8_t cksum; boolean_t cksummed = B_FALSE; efx_rc_t rc; /* * Parse every tag,keyword in the existing VPD. If the csum is present, * the assert it is correct, and is the final keyword in the RO block. */ offset = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_next_tag(data, size, &offset, &tag, &taglen)) != 0) goto fail1; if (tag == EFX_VPD_END) break; else if (tag == EFX_VPD_ID) goto done; for (pos = 0; pos != taglen; pos += 3 + keylen) { /* RV keyword must be the last in the block */ if (cksummed) { rc = EFAULT; goto fail2; } if ((rc = efx_vpd_next_keyword(data + offset, taglen, pos, &keyword, &keylen)) != 0) goto fail3; if (keyword == EFX_VPD_KEYWORD('R', 'V')) { cksum = 0; for (i = 0; i < offset + pos + 4; i++) cksum += data[i]; if (cksum != 0) { rc = EFAULT; goto fail4; } cksummed = B_TRUE; } } done: offset += taglen; } if (!cksummed) { rc = EFAULT; goto fail5; } if (cksummedp != NULL) *cksummedp = cksummed; return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static uint8_t __efx_vpd_blank_pid[] = { /* Large resource type ID length 1 */ 0x82, 0x01, 0x00, /* Product name ' ' */ 0x32, }; static uint8_t __efx_vpd_blank_r[] = { /* Large resource type VPD-R length 4 */ 0x90, 0x04, 0x00, /* RV keyword length 1 */ 'R', 'V', 0x01, /* RV payload checksum */ 0x00, }; __checkReturn efx_rc_t efx_vpd_hunk_reinit( __in_bcount(size) caddr_t data, __in size_t size, __in boolean_t wantpid) { unsigned int offset = 0; unsigned int pos; efx_byte_t byte; uint8_t cksum; efx_rc_t rc; if (size < 0x100) { rc = ENOSPC; goto fail1; } if (wantpid) { memcpy(data + offset, __efx_vpd_blank_pid, sizeof (__efx_vpd_blank_pid)); offset += sizeof (__efx_vpd_blank_pid); } memcpy(data + offset, __efx_vpd_blank_r, sizeof (__efx_vpd_blank_r)); offset += sizeof (__efx_vpd_blank_r); /* Update checksum */ cksum = 0; for (pos = 0; pos < offset; pos++) cksum += data[pos]; data[offset - 1] -= cksum; /* Append trailing tag */ EFX_POPULATE_BYTE_3(byte, TAG_TYPE, TAG_TYPE_SMALL_ITEM_DECODE, TAG_SMALL_ITEM_NAME, TAG_NAME_END_DECODE, TAG_SMALL_ITEM_SIZE, 0); data[offset] = EFX_BYTE_FIELD(byte, EFX_BYTE_0); offset++; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_vpd_hunk_next( __in_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_tag_t *tagp, __out efx_vpd_keyword_t *keywordp, __out_opt unsigned int *payloadp, __out_opt uint8_t *paylenp, __inout unsigned int *contp) { efx_vpd_tag_t tag; efx_vpd_keyword_t keyword = 0; unsigned int offset; unsigned int pos; unsigned int index; uint16_t taglen; uint8_t keylen; uint8_t paylen; efx_rc_t rc; offset = index = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_next_tag(data, size, &offset, &tag, &taglen)) != 0) goto fail1; if (tag == EFX_VPD_END) { keyword = 0; paylen = 0; index = 0; break; } if (tag == EFX_VPD_ID) { if (index++ == *contp) { EFSYS_ASSERT3U(taglen, <, 0x100); keyword = 0; paylen = (uint8_t)MIN(taglen, 0xff); goto done; } } else { for (pos = 0; pos != taglen; pos += 3 + keylen) { if ((rc = efx_vpd_next_keyword(data + offset, taglen, pos, &keyword, &keylen)) != 0) goto fail2; if (index++ == *contp) { offset += pos + 3; paylen = keylen; goto done; } } } offset += taglen; } done: *tagp = tag; *keywordp = keyword; if (payloadp != NULL) *payloadp = offset; if (paylenp != NULL) *paylenp = paylen; *contp = index; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_vpd_hunk_get( __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_tag_t tag, __in efx_vpd_keyword_t keyword, __out unsigned int *payloadp, __out uint8_t *paylenp) { efx_vpd_tag_t itag; efx_vpd_keyword_t ikeyword; unsigned int offset; unsigned int pos; uint16_t taglen; uint8_t keylen; efx_rc_t rc; offset = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_next_tag(data, size, &offset, &itag, &taglen)) != 0) goto fail1; if (itag == EFX_VPD_END) break; if (itag == tag) { if (itag == EFX_VPD_ID) { EFSYS_ASSERT3U(taglen, <, 0x100); *paylenp = (uint8_t)MIN(taglen, 0xff); *payloadp = offset; return (0); } for (pos = 0; pos != taglen; pos += 3 + keylen) { if ((rc = efx_vpd_next_keyword(data + offset, taglen, pos, &ikeyword, &keylen)) != 0) goto fail2; if (ikeyword == keyword) { *paylenp = keylen; *payloadp = offset + pos + 3; return (0); } } } offset += taglen; } /* Not an error */ return (ENOENT); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_vpd_hunk_set( __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp) { efx_word_t word; efx_vpd_tag_t tag; efx_vpd_keyword_t keyword; unsigned int offset; unsigned int pos; unsigned int taghead; unsigned int source; unsigned int dest; unsigned int i; uint16_t taglen; uint8_t keylen; uint8_t cksum; size_t used; efx_rc_t rc; switch (evvp->evv_tag) { case EFX_VPD_ID: if (evvp->evv_keyword != 0) { rc = EINVAL; goto fail1; } /* Can't delete the ID keyword */ if (evvp->evv_length == 0) { rc = EINVAL; goto fail1; } break; case EFX_VPD_RO: if (evvp->evv_keyword == EFX_VPD_KEYWORD('R', 'V')) { rc = EINVAL; goto fail1; } break; default: rc = EINVAL; goto fail1; } /* Determine total size of all current tags */ if ((rc = efx_vpd_hunk_length(data, size, &used)) != 0) goto fail2; offset = 0; _NOTE(CONSTANTCONDITION) while (1) { taghead = offset; if ((rc = efx_vpd_next_tag(data, size, &offset, &tag, &taglen)) != 0) goto fail3; if (tag == EFX_VPD_END) break; else if (tag != evvp->evv_tag) { offset += taglen; continue; } /* We only support modifying large resource tags */ if (offset - taghead != 3) { rc = EINVAL; goto fail4; } /* * Work out the offset of the byte immediately after the * old (=source) and new (=dest) new keyword/tag */ pos = 0; if (tag == EFX_VPD_ID) { source = offset + taglen; dest = offset + evvp->evv_length; goto check_space; } EFSYS_ASSERT3U(tag, ==, EFX_VPD_RO); source = dest = 0; for (pos = 0; pos != taglen; pos += 3 + keylen) { if ((rc = efx_vpd_next_keyword(data + offset, taglen, pos, &keyword, &keylen)) != 0) goto fail5; if (keyword == evvp->evv_keyword && evvp->evv_length == 0) { /* Deleting this keyword */ source = offset + pos + 3 + keylen; dest = offset + pos; break; } else if (keyword == evvp->evv_keyword) { /* Adjusting this keyword */ source = offset + pos + 3 + keylen; dest = offset + pos + 3 + evvp->evv_length; break; } else if (keyword == EFX_VPD_KEYWORD('R', 'V')) { /* The RV keyword must be at the end */ EFSYS_ASSERT3U(pos + 3 + keylen, ==, taglen); /* * The keyword doesn't already exist. If the * user deleting a non-existant keyword then * this is a no-op. */ if (evvp->evv_length == 0) return (0); /* Insert this keyword before the RV keyword */ source = offset + pos; dest = offset + pos + 3 + evvp->evv_length; break; } } check_space: if (used + dest > size + source) { rc = ENOSPC; goto fail6; } /* Move trailing data */ (void) memmove(data + dest, data + source, used - source); /* Copy contents */ memcpy(data + dest - evvp->evv_length, evvp->evv_value, evvp->evv_length); /* Insert new keyword header if required */ if (tag != EFX_VPD_ID && evvp->evv_length > 0) { EFX_POPULATE_WORD_1(word, EFX_WORD_0, evvp->evv_keyword); data[offset + pos + 0] = EFX_WORD_FIELD(word, EFX_BYTE_0); data[offset + pos + 1] = EFX_WORD_FIELD(word, EFX_BYTE_1); data[offset + pos + 2] = evvp->evv_length; } /* Modify tag length (large resource type) */ - taglen += (dest - source); + taglen += (uint16_t)(dest - source); EFX_POPULATE_WORD_1(word, EFX_WORD_0, taglen); data[offset - 2] = EFX_WORD_FIELD(word, EFX_BYTE_0); data[offset - 1] = EFX_WORD_FIELD(word, EFX_BYTE_1); goto checksum; } /* Unable to find the matching tag */ rc = ENOENT; goto fail7; checksum: /* Find the RV tag, and update the checksum */ offset = 0; _NOTE(CONSTANTCONDITION) while (1) { if ((rc = efx_vpd_next_tag(data, size, &offset, &tag, &taglen)) != 0) goto fail8; if (tag == EFX_VPD_END) break; if (tag == EFX_VPD_RO) { for (pos = 0; pos != taglen; pos += 3 + keylen) { if ((rc = efx_vpd_next_keyword(data + offset, taglen, pos, &keyword, &keylen)) != 0) goto fail9; if (keyword == EFX_VPD_KEYWORD('R', 'V')) { cksum = 0; for (i = 0; i < offset + pos + 3; i++) cksum += data[i]; data[i] = -cksum; break; } } } offset += taglen; } /* Zero out the unused portion */ (void) memset(data + offset + taglen, 0xff, size - offset - taglen); return (0); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_vpd_fini( __in efx_nic_t *enp) { const efx_vpd_ops_t *evpdop = enp->en_evpdop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD); if (evpdop->evpdo_fini != NULL) evpdop->evpdo_fini(enp); enp->en_evpdop = NULL; enp->en_mod_flags &= ~EFX_MOD_VPD; } #endif /* EFSYS_OPT_VPD */ Index: projects/clang700-import/sys/dev/sfxge/common/mcdi_mon.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/mcdi_mon.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/mcdi_mon.c (revision 340918) @@ -1,568 +1,578 @@ /*- * Copyright (c) 2009-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_MON_MCDI #if EFSYS_OPT_MON_STATS #define MCDI_MON_NEXT_PAGE ((uint16_t)0xfffe) #define MCDI_MON_INVALID_SENSOR ((uint16_t)0xfffd) #define MCDI_MON_PAGE_SIZE 0x20 /* Bitmasks of valid port(s) for each sensor */ #define MCDI_MON_PORT_NONE (0x00) #define MCDI_MON_PORT_P1 (0x01) #define MCDI_MON_PORT_P2 (0x02) #define MCDI_MON_PORT_P3 (0x04) #define MCDI_MON_PORT_P4 (0x08) #define MCDI_MON_PORT_Px (0xFFFF) /* Get port mask from one-based MCDI port number */ #define MCDI_MON_PORT_MASK(_emip) (1U << ((_emip)->emi_port - 1)) /* Entry for MCDI sensor in sensor map */ #define STAT(portmask, stat) \ { (MCDI_MON_PORT_##portmask), (EFX_MON_STAT_##stat) } /* Entry for sensor next page flag in sensor map */ #define STAT_NEXT_PAGE() \ { MCDI_MON_PORT_NONE, MCDI_MON_NEXT_PAGE } /* Placeholder for gaps in the array */ #define STAT_NO_SENSOR() \ { MCDI_MON_PORT_NONE, MCDI_MON_INVALID_SENSOR } /* Map from MC sensors to monitor statistics */ static const struct mcdi_sensor_map_s { uint16_t msm_port_mask; uint16_t msm_stat; } mcdi_sensor_map[] = { /* Sensor page 0 MC_CMD_SENSOR_xxx */ STAT(Px, INT_TEMP), /* 0x00 CONTROLLER_TEMP */ STAT(Px, EXT_TEMP), /* 0x01 PHY_COMMON_TEMP */ STAT(Px, INT_COOLING), /* 0x02 CONTROLLER_COOLING */ STAT(P1, EXT_TEMP), /* 0x03 PHY0_TEMP */ STAT(P1, EXT_COOLING), /* 0x04 PHY0_COOLING */ STAT(P2, EXT_TEMP), /* 0x05 PHY1_TEMP */ STAT(P2, EXT_COOLING), /* 0x06 PHY1_COOLING */ STAT(Px, 1V), /* 0x07 IN_1V0 */ STAT(Px, 1_2V), /* 0x08 IN_1V2 */ STAT(Px, 1_8V), /* 0x09 IN_1V8 */ STAT(Px, 2_5V), /* 0x0a IN_2V5 */ STAT(Px, 3_3V), /* 0x0b IN_3V3 */ STAT(Px, 12V), /* 0x0c IN_12V0 */ STAT(Px, 1_2VA), /* 0x0d IN_1V2A */ STAT(Px, VREF), /* 0x0e IN_VREF */ STAT(Px, VAOE), /* 0x0f OUT_VAOE */ STAT(Px, AOE_TEMP), /* 0x10 AOE_TEMP */ STAT(Px, PSU_AOE_TEMP), /* 0x11 PSU_AOE_TEMP */ STAT(Px, PSU_TEMP), /* 0x12 PSU_TEMP */ STAT(Px, FAN0), /* 0x13 FAN_0 */ STAT(Px, FAN1), /* 0x14 FAN_1 */ STAT(Px, FAN2), /* 0x15 FAN_2 */ STAT(Px, FAN3), /* 0x16 FAN_3 */ STAT(Px, FAN4), /* 0x17 FAN_4 */ STAT(Px, VAOE_IN), /* 0x18 IN_VAOE */ STAT(Px, IAOE), /* 0x19 OUT_IAOE */ STAT(Px, IAOE_IN), /* 0x1a IN_IAOE */ STAT(Px, NIC_POWER), /* 0x1b NIC_POWER */ STAT(Px, 0_9V), /* 0x1c IN_0V9 */ STAT(Px, I0_9V), /* 0x1d IN_I0V9 */ STAT(Px, I1_2V), /* 0x1e IN_I1V2 */ STAT_NEXT_PAGE(), /* 0x1f Next page flag (not a sensor) */ /* Sensor page 1 MC_CMD_SENSOR_xxx */ STAT(Px, 0_9V_ADC), /* 0x20 IN_0V9_ADC */ STAT(Px, INT_TEMP2), /* 0x21 CONTROLLER_2_TEMP */ STAT(Px, VREG_TEMP), /* 0x22 VREG_INTERNAL_TEMP */ STAT(Px, VREG_0_9V_TEMP), /* 0x23 VREG_0V9_TEMP */ STAT(Px, VREG_1_2V_TEMP), /* 0x24 VREG_1V2_TEMP */ STAT(Px, INT_VPTAT), /* 0x25 CTRLR. VPTAT */ STAT(Px, INT_ADC_TEMP), /* 0x26 CTRLR. INTERNAL_TEMP */ STAT(Px, EXT_VPTAT), /* 0x27 CTRLR. VPTAT_EXTADC */ STAT(Px, EXT_ADC_TEMP), /* 0x28 CTRLR. INTERNAL_TEMP_EXTADC */ STAT(Px, AMBIENT_TEMP), /* 0x29 AMBIENT_TEMP */ STAT(Px, AIRFLOW), /* 0x2a AIRFLOW */ STAT(Px, VDD08D_VSS08D_CSR), /* 0x2b VDD08D_VSS08D_CSR */ STAT(Px, VDD08D_VSS08D_CSR_EXTADC), /* 0x2c VDD08D_VSS08D_CSR_EXTADC */ STAT(Px, HOTPOINT_TEMP), /* 0x2d HOTPOINT_TEMP */ STAT(P1, PHY_POWER_SWITCH_PORT0), /* 0x2e PHY_POWER_SWITCH_PORT0 */ STAT(P2, PHY_POWER_SWITCH_PORT1), /* 0x2f PHY_POWER_SWITCH_PORT1 */ STAT(Px, MUM_VCC), /* 0x30 MUM_VCC */ STAT(Px, 0V9_A), /* 0x31 0V9_A */ STAT(Px, I0V9_A), /* 0x32 I0V9_A */ STAT(Px, 0V9_A_TEMP), /* 0x33 0V9_A_TEMP */ STAT(Px, 0V9_B), /* 0x34 0V9_B */ STAT(Px, I0V9_B), /* 0x35 I0V9_B */ STAT(Px, 0V9_B_TEMP), /* 0x36 0V9_B_TEMP */ STAT(Px, CCOM_AVREG_1V2_SUPPLY), /* 0x37 CCOM_AVREG_1V2_SUPPLY */ STAT(Px, CCOM_AVREG_1V2_SUPPLY_EXT_ADC), /* 0x38 CCOM_AVREG_1V2_SUPPLY_EXT_ADC */ STAT(Px, CCOM_AVREG_1V8_SUPPLY), /* 0x39 CCOM_AVREG_1V8_SUPPLY */ STAT(Px, CCOM_AVREG_1V8_SUPPLY_EXT_ADC), /* 0x3a CCOM_AVREG_1V8_SUPPLY_EXT_ADC */ STAT_NO_SENSOR(), /* 0x3b (no sensor) */ STAT_NO_SENSOR(), /* 0x3c (no sensor) */ STAT_NO_SENSOR(), /* 0x3d (no sensor) */ STAT_NO_SENSOR(), /* 0x3e (no sensor) */ STAT_NEXT_PAGE(), /* 0x3f Next page flag (not a sensor) */ /* Sensor page 2 MC_CMD_SENSOR_xxx */ STAT(Px, CONTROLLER_MASTER_VPTAT), /* 0x40 MASTER_VPTAT */ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP), /* 0x41 MASTER_INT_TEMP */ STAT(Px, CONTROLLER_MASTER_VPTAT_EXT_ADC), /* 0x42 MAST_VPTAT_EXT_ADC */ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC), /* 0x43 MASTER_INTERNAL_TEMP_EXT_ADC */ STAT(Px, CONTROLLER_SLAVE_VPTAT), /* 0x44 SLAVE_VPTAT */ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP), /* 0x45 SLAVE_INTERNAL_TEMP */ STAT(Px, CONTROLLER_SLAVE_VPTAT_EXT_ADC), /* 0x46 SLAVE_VPTAT_EXT_ADC */ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC), /* 0x47 SLAVE_INTERNAL_TEMP_EXT_ADC */ STAT_NO_SENSOR(), /* 0x48 (no sensor) */ STAT(Px, SODIMM_VOUT), /* 0x49 SODIMM_VOUT */ STAT(Px, SODIMM_0_TEMP), /* 0x4a SODIMM_0_TEMP */ STAT(Px, SODIMM_1_TEMP), /* 0x4b SODIMM_1_TEMP */ STAT(Px, PHY0_VCC), /* 0x4c PHY0_VCC */ STAT(Px, PHY1_VCC), /* 0x4d PHY1_VCC */ STAT(Px, CONTROLLER_TDIODE_TEMP), /* 0x4e CONTROLLER_TDIODE_TEMP */ STAT(Px, BOARD_FRONT_TEMP), /* 0x4f BOARD_FRONT_TEMP */ STAT(Px, BOARD_BACK_TEMP), /* 0x50 BOARD_BACK_TEMP */ + STAT(Px, I1V8), /* 0x51 IN_I1V8 */ + STAT(Px, I2V5), /* 0x52 IN_I2V5 */ }; #define MCDI_STATIC_SENSOR_ASSERT(_field) \ EFX_STATIC_ASSERT(MC_CMD_SENSOR_STATE_ ## _field \ == EFX_MON_STAT_STATE_ ## _field) static void mcdi_mon_decode_stats( __in efx_nic_t *enp, __in_bcount(sensor_mask_size) uint32_t *sensor_mask, __in size_t sensor_mask_size, __in_opt efsys_mem_t *esmp, __out_bcount_opt(sensor_mask_size) uint32_t *stat_maskp, __inout_ecount_opt(EFX_MON_NSTATS) efx_mon_stat_value_t *stat) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); uint16_t port_mask; uint16_t sensor; size_t sensor_max; uint32_t stat_mask[(EFX_ARRAY_SIZE(mcdi_sensor_map) + 31) / 32]; uint32_t idx = 0; uint32_t page = 0; /* Assert the MC_CMD_SENSOR and EFX_MON_STATE namespaces agree */ MCDI_STATIC_SENSOR_ASSERT(OK); MCDI_STATIC_SENSOR_ASSERT(WARNING); MCDI_STATIC_SENSOR_ASSERT(FATAL); MCDI_STATIC_SENSOR_ASSERT(BROKEN); MCDI_STATIC_SENSOR_ASSERT(NO_READING); EFX_STATIC_ASSERT(sizeof (stat_mask[0]) * 8 == EFX_MON_MASK_ELEMENT_SIZE); sensor_max = MIN((8 * sensor_mask_size), EFX_ARRAY_SIZE(mcdi_sensor_map)); EFSYS_ASSERT(emip->emi_port > 0); /* MCDI port number is one-based */ port_mask = MCDI_MON_PORT_MASK(emip); memset(stat_mask, 0, sizeof (stat_mask)); /* * The MCDI sensor readings in the DMA buffer are a packed array of * MC_CMD_SENSOR_VALUE_ENTRY structures, which only includes entries for * supported sensors (bit set in sensor_mask). The sensor_mask and * sensor readings do not include entries for the per-page NEXT_PAGE * flag. * * sensor_mask may legitimately contain MCDI sensors that the driver * does not understand. */ for (sensor = 0; sensor < sensor_max; ++sensor) { efx_mon_stat_t id = mcdi_sensor_map[sensor].msm_stat; if ((sensor % MCDI_MON_PAGE_SIZE) == MC_CMD_SENSOR_PAGE0_NEXT) { EFSYS_ASSERT3U(id, ==, MCDI_MON_NEXT_PAGE); page++; continue; } if (~(sensor_mask[page]) & (1U << sensor)) continue; idx++; if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0) continue; EFSYS_ASSERT(id < EFX_MON_NSTATS); /* * stat_mask is a bitmask indexed by EFX_MON_* monitor statistic * identifiers from efx_mon_stat_t (without NEXT_PAGE bits). * * If there is an entry in the MCDI sensor to monitor statistic * map then the sensor reading is used for the value of the * monitor statistic. */ stat_mask[id / EFX_MON_MASK_ELEMENT_SIZE] |= (1U << (id % EFX_MON_MASK_ELEMENT_SIZE)); if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) { efx_dword_t dword; /* Get MCDI sensor reading from DMA buffer */ EFSYS_MEM_READD(esmp, 4 * (idx - 1), &dword); /* Update EFX monitor stat from MCDI sensor reading */ stat[id].emsv_value = (uint16_t)EFX_DWORD_FIELD(dword, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE); stat[id].emsv_state = (uint16_t)EFX_DWORD_FIELD(dword, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE); } } if (stat_maskp != NULL) { memcpy(stat_maskp, stat_mask, sizeof (stat_mask)); } } __checkReturn efx_rc_t mcdi_mon_ev( __in efx_nic_t *enp, __in efx_qword_t *eqp, __out efx_mon_stat_t *idp, __out efx_mon_stat_value_t *valuep) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); - efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint16_t port_mask; uint16_t sensor; uint16_t state; uint16_t value; efx_mon_stat_t id; efx_rc_t rc; EFSYS_ASSERT(emip->emi_port > 0); /* MCDI port number is one-based */ port_mask = MCDI_MON_PORT_MASK(emip); sensor = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_MONITOR); state = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_STATE); value = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_VALUE); /* Hardware must support this MCDI sensor */ - EFSYS_ASSERT3U(sensor, <, (8 * encp->enc_mcdi_sensor_mask_size)); + EFSYS_ASSERT3U(sensor, <, + (8 * enp->en_nic_cfg.enc_mcdi_sensor_mask_size)); EFSYS_ASSERT((sensor % MCDI_MON_PAGE_SIZE) != MC_CMD_SENSOR_PAGE0_NEXT); - EFSYS_ASSERT(encp->enc_mcdi_sensor_maskp != NULL); - EFSYS_ASSERT((encp->enc_mcdi_sensor_maskp[sensor / MCDI_MON_PAGE_SIZE] & - (1U << (sensor % MCDI_MON_PAGE_SIZE))) != 0); + EFSYS_ASSERT(enp->en_nic_cfg.enc_mcdi_sensor_maskp != NULL); + EFSYS_ASSERT( + (enp->en_nic_cfg.enc_mcdi_sensor_maskp[sensor/MCDI_MON_PAGE_SIZE] & + (1U << (sensor % MCDI_MON_PAGE_SIZE))) != 0); /* But we don't have to understand it */ if (sensor >= EFX_ARRAY_SIZE(mcdi_sensor_map)) { rc = ENOTSUP; goto fail1; } id = mcdi_sensor_map[sensor].msm_stat; if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0) return (ENODEV); EFSYS_ASSERT(id < EFX_MON_NSTATS); *idp = id; valuep->emsv_value = value; valuep->emsv_state = state; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_read_sensors( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __in uint32_t size) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_READ_SENSORS_EXT_IN_LEN, MC_CMD_READ_SENSORS_EXT_OUT_LEN)]; uint32_t addr_lo, addr_hi; req.emr_cmd = MC_CMD_READ_SENSORS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_READ_SENSORS_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_READ_SENSORS_EXT_OUT_LEN; addr_lo = (uint32_t)(EFSYS_MEM_ADDR(esmp) & 0xffffffff); addr_hi = (uint32_t)(EFSYS_MEM_ADDR(esmp) >> 32); MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_LO, addr_lo); MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_HI, addr_hi); MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_LENGTH, size); efx_mcdi_execute(enp, &req); return (req.emr_rc); } static __checkReturn efx_rc_t efx_mcdi_sensor_info_npages( __in efx_nic_t *enp, __out uint32_t *npagesp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN, MC_CMD_SENSOR_INFO_OUT_LENMAX)]; int page; efx_rc_t rc; EFSYS_ASSERT(npagesp != NULL); page = 0; do { (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SENSOR_INFO; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX; MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page++); efx_mcdi_execute_quiet(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } } while (MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK) & (1U << MC_CMD_SENSOR_PAGE0_NEXT)); *npagesp = page; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t efx_mcdi_sensor_info( __in efx_nic_t *enp, __out_ecount(npages) uint32_t *sensor_maskp, __in size_t npages) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN, MC_CMD_SENSOR_INFO_OUT_LENMAX)]; uint32_t page; efx_rc_t rc; EFSYS_ASSERT(sensor_maskp != NULL); + if (npages < 1) { + rc = EINVAL; + goto fail1; + } + for (page = 0; page < npages; page++) { uint32_t mask; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SENSOR_INFO; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX; MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; - goto fail1; + goto fail2; } mask = MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK); if ((page != (npages - 1)) && ((mask & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) == 0)) { rc = EINVAL; - goto fail2; + goto fail3; } sensor_maskp[page] = mask; } if (sensor_maskp[npages - 1] & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) { rc = EINVAL; - goto fail3; + goto fail4; } return (0); +fail4: + EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t mcdi_mon_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t size = encp->enc_mon_stat_dma_buf_size; efx_rc_t rc; if ((rc = efx_mcdi_read_sensors(enp, esmp, size)) != 0) goto fail1; EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, size); mcdi_mon_decode_stats(enp, encp->enc_mcdi_sensor_maskp, encp->enc_mcdi_sensor_mask_size, esmp, NULL, values); return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t mcdi_mon_cfg_build( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t npages; efx_rc_t rc; switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: encp->enc_mon_type = EFX_MON_SFC90X0; break; #endif #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: encp->enc_mon_type = EFX_MON_SFC91X0; break; #endif #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: encp->enc_mon_type = EFX_MON_SFC92X0; break; #endif default: rc = EINVAL; goto fail1; } /* Get mc sensor mask size */ npages = 0; if ((rc = efx_mcdi_sensor_info_npages(enp, &npages)) != 0) goto fail2; encp->enc_mon_stat_dma_buf_size = npages * EFX_MON_STATS_PAGE_SIZE; encp->enc_mcdi_sensor_mask_size = npages * sizeof (uint32_t); /* Allocate mc sensor mask */ EFSYS_KMEM_ALLOC(enp->en_esip, encp->enc_mcdi_sensor_mask_size, encp->enc_mcdi_sensor_maskp); if (encp->enc_mcdi_sensor_maskp == NULL) { rc = ENOMEM; goto fail3; } /* Read mc sensor mask */ if ((rc = efx_mcdi_sensor_info(enp, encp->enc_mcdi_sensor_maskp, npages)) != 0) goto fail4; /* Build monitor statistics mask */ mcdi_mon_decode_stats(enp, encp->enc_mcdi_sensor_maskp, encp->enc_mcdi_sensor_mask_size, NULL, encp->enc_mon_stat_mask, NULL); return (0); fail4: EFSYS_PROBE(fail4); EFSYS_KMEM_FREE(enp->en_esip, encp->enc_mcdi_sensor_mask_size, encp->enc_mcdi_sensor_maskp); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void mcdi_mon_cfg_free( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); if (encp->enc_mcdi_sensor_maskp != NULL) { EFSYS_KMEM_FREE(enp->en_esip, encp->enc_mcdi_sensor_mask_size, encp->enc_mcdi_sensor_maskp); } } #endif /* EFSYS_OPT_MON_STATS */ #endif /* EFSYS_OPT_MON_MCDI */ Index: projects/clang700-import/sys/dev/sfxge/common/medford_impl.h =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/medford_impl.h (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/medford_impl.h (revision 340918) @@ -1,69 +1,61 @@ /*- * Copyright (c) 2015-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_MEDFORD_IMPL_H #define _SYS_MEDFORD_IMPL_H #ifdef __cplusplus extern "C" { #endif -/* Alignment requirement for value written to RX WPTR: - * the WPTR must be aligned to an 8 descriptor boundary - * - * FIXME: Is this the same on Medford as Huntington? - */ -#define MEDFORD_RX_WPTR_ALIGN 8 - - #ifndef ER_EZ_TX_PIOBUF_SIZE #define ER_EZ_TX_PIOBUF_SIZE 4096 #endif #define MEDFORD_PIOBUF_NBUFS (16) #define MEDFORD_PIOBUF_SIZE (ER_EZ_TX_PIOBUF_SIZE) #define MEDFORD_MIN_PIO_ALLOC_SIZE (MEDFORD_PIOBUF_SIZE / 32) extern __checkReturn efx_rc_t medford_board_cfg( __in efx_nic_t *enp); #ifdef __cplusplus } #endif #endif /* _SYS_MEDFORD_IMPL_H */ Index: projects/clang700-import/sys/dev/sfxge/common/medford_nic.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/medford_nic.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/medford_nic.c (revision 340918) @@ -1,412 +1,413 @@ /*- * Copyright (c) 2015-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_MEDFORD static __checkReturn efx_rc_t efx_mcdi_get_rxdp_config( __in efx_nic_t *enp, __out uint32_t *end_paddingp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN, MC_CMD_GET_RXDP_CONFIG_OUT_LEN)]; uint32_t end_padding; efx_rc_t rc; memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_RXDP_CONFIG; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) { /* RX DMA end padding is disabled */ end_padding = 0; } else { switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) { case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64: end_padding = 64; break; case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128: end_padding = 128; break; case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256: end_padding = 256; break; default: rc = ENOTSUP; goto fail2; } } *end_paddingp = end_padding; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t medford_nic_get_required_pcie_bandwidth( __in efx_nic_t *enp, __out uint32_t *bandwidth_mbpsp) { uint32_t port_modes; uint32_t current_mode; uint32_t bandwidth; efx_rc_t rc; if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t_mode)) != 0) { /* No port mode info available. */ bandwidth = 0; goto out; } if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode, &bandwidth)) != 0) goto fail1; out: *bandwidth_mbpsp = bandwidth; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t medford_board_cfg( __in efx_nic_t *enp) { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint8_t mac_addr[6] = { 0 }; uint32_t board_type = 0; ef10_link_state_t els; efx_port_t *epp = &(enp->en_port); uint32_t port; uint32_t pf; uint32_t vf; uint32_t mask; uint32_t sysclk, dpcpu_clk; uint32_t base, nvec; uint32_t end_padding; uint32_t bandwidth; efx_rc_t rc; /* * FIXME: Likely to be incomplete and incorrect. * Parts of this should be shared with Huntington. */ if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0) goto fail1; /* * NOTE: The MCDI protocol numbers ports from zero. * The common code MCDI interface numbers ports from one. */ emip->emi_port = port + 1; if ((rc = ef10_external_port_mapping(enp, port, &encp->enc_external_port)) != 0) goto fail2; /* * Get PCIe function number from firmware (used for * per-function privilege and dynamic config info). * - PCIe PF: pf = PF number, vf = 0xffff. * - PCIe VF: pf = parent PF, vf = VF number. */ if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0) goto fail3; encp->enc_pf = pf; encp->enc_vf = vf; /* MAC address for this function */ if (EFX_PCI_FUNCTION_IS_PF(encp)) { rc = efx_mcdi_get_mac_address_pf(enp, mac_addr); #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC - /* Disable static config checking for Medford NICs, ONLY + /* + * Disable static config checking for Medford NICs, ONLY * for manufacturing test and setup at the factory, to * allow the static config to be installed. */ #else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ if ((rc == 0) && (mac_addr[0] & 0x02)) { /* * If the static config does not include a global MAC * address pool then the board may return a locally * administered MAC address (this should only happen on * incorrectly programmed boards). */ rc = EINVAL; } #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ } else { rc = efx_mcdi_get_mac_address_vf(enp, mac_addr); } if (rc != 0) goto fail4; EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); /* Board configuration */ rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL); if (rc != 0) { /* Unprivileged functions may not be able to read board cfg */ if (rc == EACCES) board_type = 0; else goto fail5; } encp->enc_board_type = board_type; encp->enc_clk_mult = 1; /* not used for Medford */ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) goto fail6; /* Obtain the default PHY advertised capabilities */ if ((rc = ef10_phy_get_link(enp, &els)) != 0) goto fail7; epp->ep_default_adv_cap_mask = els.els_adv_cap_mask; epp->ep_adv_cap_mask = els.els_adv_cap_mask; /* * Enable firmware workarounds for hardware errata. * Expected responses are: * - 0 (zero): * Success: workaround enabled or disabled as requested. * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP): * Firmware does not support the MC_CMD_WORKAROUND request. * (assume that the workaround is not supported). * - MC_CMD_ERR_ENOENT (reported as ENOENT): * Firmware does not support the requested workaround. * - MC_CMD_ERR_EPERM (reported as EACCES): * Unprivileged function cannot enable/disable workarounds. * * See efx_mcdi_request_errcode() for MCDI error translations. */ if (EFX_PCI_FUNCTION_IS_VF(encp)) { /* * Interrupt testing does not work for VFs. See bug50084. * FIXME: Does this still apply to Medford? */ encp->enc_bug41750_workaround = B_TRUE; } /* Chained multicast is always enabled on Medford */ encp->enc_bug26807_workaround = B_TRUE; /* * If the bug61265 workaround is enabled, then interrupt holdoff timers * cannot be controlled by timer table writes, so MCDI must be used * (timer table writes can still be used for wakeup timers). */ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE, NULL); if ((rc == 0) || (rc == EACCES)) encp->enc_bug61265_workaround = B_TRUE; else if ((rc == ENOTSUP) || (rc == ENOENT)) encp->enc_bug61265_workaround = B_FALSE; else goto fail8; /* Get clock frequencies (in MHz). */ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0) goto fail9; /* * The Medford timer quantum is 1536 dpcpu_clk cycles, documented for * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units. */ encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; /* Check capabilities of running datapath firmware */ if ((rc = ef10_get_datapath_caps(enp)) != 0) goto fail10; /* Alignment for receive packet DMA buffers */ encp->enc_rx_buf_align_start = 1; /* Get the RX DMA end padding alignment configuration */ if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) { if (rc != EACCES) goto fail11; /* Assume largest tail padding size supported by hardware */ end_padding = 256; } encp->enc_rx_buf_align_end = end_padding; /* Alignment for WPTR updates */ encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN; /* * Maximum number of exclusive RSS contexts which can be allocated. The * hardware supports 64, but 6 are reserved for shared contexts. They * are a global resource so not all may be available. */ encp->enc_rx_scale_max_exclusive_contexts = 58; encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT); /* No boundary crossing limits */ encp->enc_tx_dma_desc_boundary = 0; /* * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available * resources (allocated to this PCIe function), which is zero until * after we have allocated VIs. */ encp->enc_evq_limit = 1024; encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; /* * The maximum supported transmit queue size is 2048. TXQs with 4096 * descriptors are not supported as the top bit is used for vfifo * stuffing. */ encp->enc_txq_max_ndescs = 2048; encp->enc_buftbl_limit = 0xFFFFFFFF; encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS; encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE; encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE; /* * Get the current privilege mask. Note that this may be modified * dynamically, so this value is informational only. DO NOT use * the privilege mask to check for sufficient privileges, as that * can result in time-of-check/time-of-use bugs. */ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0) goto fail12; encp->enc_privilege_mask = mask; /* Get interrupt vector limits */ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { if (EFX_PCI_FUNCTION_IS_PF(encp)) goto fail13; /* Ignore error (cannot query vector limits from a VF). */ base = 0; nvec = 1024; } encp->enc_intr_vec_base = base; encp->enc_intr_limit = nvec; /* * Maximum number of bytes into the frame the TCP header can start for * firmware assisted TSO to work. */ encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT; /* * Medford stores a single global copy of VPD, not per-PF as on * Huntington. */ encp->enc_vpd_is_global = B_TRUE; rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth); if (rc != 0) goto fail14; encp->enc_required_pcie_bandwidth_mbps = bandwidth; encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3; return (0); fail14: EFSYS_PROBE(fail14); fail13: EFSYS_PROBE(fail13); fail12: EFSYS_PROBE(fail12); fail11: EFSYS_PROBE(fail11); fail10: EFSYS_PROBE(fail10); fail9: EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_MEDFORD */ Index: projects/clang700-import/sys/dev/sfxge/common/siena_impl.h =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/siena_impl.h (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/siena_impl.h (revision 340918) @@ -1,437 +1,455 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_SIENA_IMPL_H #define _SYS_SIENA_IMPL_H #include "efx.h" #include "efx_regs.h" #include "efx_mcdi.h" #include "siena_flash.h" #ifdef __cplusplus extern "C" { #endif +#ifndef EFX_TXQ_DC_SIZE +#define EFX_TXQ_DC_SIZE 1 /* 16 descriptors */ +#endif +#ifndef EFX_RXQ_DC_SIZE +#define EFX_RXQ_DC_SIZE 3 /* 64 descriptors */ +#endif +#define EFX_TXQ_DC_NDESCS(_dcsize) (8 << (_dcsize)) + #define SIENA_NVRAM_CHUNK 0x80 + extern __checkReturn efx_rc_t siena_nic_probe( __in efx_nic_t *enp); extern __checkReturn efx_rc_t siena_nic_reset( __in efx_nic_t *enp); extern __checkReturn efx_rc_t siena_nic_init( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG + +extern efx_sram_pattern_fn_t __efx_sram_pattern_fns[]; + +typedef struct siena_register_set_s { + unsigned int address; + unsigned int step; + unsigned int rows; + efx_oword_t mask; +} siena_register_set_t; extern __checkReturn efx_rc_t siena_nic_register_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern void siena_nic_fini( __in efx_nic_t *enp); extern void siena_nic_unprobe( __in efx_nic_t *enp); #define SIENA_SRAM_ROWS 0x12000 extern void siena_sram_init( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG extern __checkReturn efx_rc_t siena_sram_test( __in efx_nic_t *enp, __in efx_sram_pattern_fn_t func); #endif /* EFSYS_OPT_DIAG */ #if EFSYS_OPT_MCDI extern __checkReturn efx_rc_t siena_mcdi_init( __in efx_nic_t *enp, __in const efx_mcdi_transport_t *mtp); extern void siena_mcdi_send_request( __in efx_nic_t *enp, __in_bcount(hdr_len) void *hdrp, __in size_t hdr_len, __in_bcount(sdu_len) void *sdup, __in size_t sdu_len); extern __checkReturn boolean_t siena_mcdi_poll_response( __in efx_nic_t *enp); extern void siena_mcdi_read_response( __in efx_nic_t *enp, __out_bcount(length) void *bufferp, __in size_t offset, __in size_t length); extern efx_rc_t siena_mcdi_poll_reboot( __in efx_nic_t *enp); extern void siena_mcdi_fini( __in efx_nic_t *enp); extern __checkReturn efx_rc_t siena_mcdi_feature_supported( __in efx_nic_t *enp, __in efx_mcdi_feature_id_t id, __out boolean_t *supportedp); extern void siena_mcdi_get_timeout( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __out uint32_t *timeoutp); #endif /* EFSYS_OPT_MCDI */ #if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD extern __checkReturn efx_rc_t siena_nvram_partn_lock( __in efx_nic_t *enp, __in uint32_t partn); extern __checkReturn efx_rc_t siena_nvram_partn_unlock( __in efx_nic_t *enp, __in uint32_t partn, __out_opt uint32_t *verify_resultp); extern __checkReturn efx_rc_t siena_nvram_get_dynamic_cfg( __in efx_nic_t *enp, __in uint32_t partn, __in boolean_t vpd, __out siena_mc_dynamic_config_hdr_t **dcfgp, __out size_t *sizep); #endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ #if EFSYS_OPT_NVRAM #if EFSYS_OPT_DIAG extern __checkReturn efx_rc_t siena_nvram_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern __checkReturn efx_rc_t siena_nvram_get_subtype( __in efx_nic_t *enp, __in uint32_t partn, __out uint32_t *subtypep); extern __checkReturn efx_rc_t siena_nvram_type_to_partn( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *partnp); extern __checkReturn efx_rc_t siena_nvram_partn_size( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *sizep); extern __checkReturn efx_rc_t siena_nvram_partn_rw_start( __in efx_nic_t *enp, __in uint32_t partn, __out size_t *chunk_sizep); extern __checkReturn efx_rc_t siena_nvram_partn_read( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t siena_nvram_partn_erase( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __in size_t size); extern __checkReturn efx_rc_t siena_nvram_partn_write( __in efx_nic_t *enp, __in uint32_t partn, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t siena_nvram_partn_rw_finish( __in efx_nic_t *enp, __in uint32_t partn, __out_opt uint32_t *verify_resultp); extern __checkReturn efx_rc_t siena_nvram_partn_get_version( __in efx_nic_t *enp, __in uint32_t partn, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]); extern __checkReturn efx_rc_t siena_nvram_partn_set_version( __in efx_nic_t *enp, __in uint32_t partn, __in_ecount(4) uint16_t version[4]); #endif /* EFSYS_OPT_NVRAM */ #if EFSYS_OPT_VPD extern __checkReturn efx_rc_t siena_vpd_init( __in efx_nic_t *enp); extern __checkReturn efx_rc_t siena_vpd_size( __in efx_nic_t *enp, __out size_t *sizep); extern __checkReturn efx_rc_t siena_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t siena_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t siena_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t siena_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp); extern __checkReturn efx_rc_t siena_vpd_set( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp); extern __checkReturn efx_rc_t siena_vpd_next( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp); extern __checkReturn efx_rc_t siena_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern void siena_vpd_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_VPD */ typedef struct siena_link_state_s { uint32_t sls_adv_cap_mask; uint32_t sls_lp_cap_mask; unsigned int sls_fcntl; efx_link_mode_t sls_link_mode; #if EFSYS_OPT_LOOPBACK efx_loopback_type_t sls_loopback; #endif boolean_t sls_mac_up; } siena_link_state_t; extern void siena_phy_link_ev( __in efx_nic_t *enp, __in efx_qword_t *eqp, __out efx_link_mode_t *link_modep); extern __checkReturn efx_rc_t siena_phy_get_link( __in efx_nic_t *enp, __out siena_link_state_t *slsp); extern __checkReturn efx_rc_t siena_phy_power( __in efx_nic_t *enp, __in boolean_t on); extern __checkReturn efx_rc_t siena_phy_reconfigure( __in efx_nic_t *enp); extern __checkReturn efx_rc_t siena_phy_verify( __in efx_nic_t *enp); extern __checkReturn efx_rc_t siena_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip); #if EFSYS_OPT_PHY_STATS extern void siena_phy_decode_stats( __in efx_nic_t *enp, __in uint32_t vmask, __in_opt efsys_mem_t *esmp, __out_opt uint64_t *smaskp, __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat); extern __checkReturn efx_rc_t siena_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat); #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_BIST extern __checkReturn efx_rc_t siena_phy_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type); extern __checkReturn efx_rc_t siena_phy_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt __drv_when(count > 0, __notnull) uint32_t *value_maskp, __out_ecount_opt(count) __drv_when(count > 0, __notnull) unsigned long *valuesp, __in size_t count); extern void siena_phy_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type); #endif /* EFSYS_OPT_BIST */ extern __checkReturn efx_rc_t siena_mac_poll( __in efx_nic_t *enp, __out efx_link_mode_t *link_modep); extern __checkReturn efx_rc_t siena_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp); extern __checkReturn efx_rc_t siena_mac_reconfigure( __in efx_nic_t *enp); extern __checkReturn efx_rc_t siena_mac_pdu_get( __in efx_nic_t *enp, __out size_t *pdu); #if EFSYS_OPT_LOOPBACK extern __checkReturn efx_rc_t siena_mac_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t loopback_type); #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_MAC_STATS extern __checkReturn efx_rc_t siena_mac_stats_get_mask( __in efx_nic_t *enp, __inout_bcount(mask_size) uint32_t *maskp, __in size_t mask_size); extern __checkReturn efx_rc_t siena_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp); #endif /* EFSYS_OPT_MAC_STATS */ #ifdef __cplusplus } #endif #endif /* _SYS_SIENA_IMPL_H */ Index: projects/clang700-import/sys/dev/sfxge/common/siena_nic.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/siena_nic.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/siena_nic.c (revision 340918) @@ -1,593 +1,800 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #include "mcdi_mon.h" #if EFSYS_OPT_SIENA #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM static __checkReturn efx_rc_t siena_nic_get_partn_mask( __in efx_nic_t *enp, __out unsigned int *maskp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_NVRAM_TYPES_IN_LEN, MC_CMD_NVRAM_TYPES_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_NVRAM_TYPES; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_NVRAM_TYPES_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_NVRAM_TYPES_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_NVRAM_TYPES_OUT_LEN) { rc = EMSGSIZE; goto fail2; } *maskp = MCDI_OUT_DWORD(req, NVRAM_TYPES_OUT_TYPES); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */ static __checkReturn efx_rc_t siena_board_cfg( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint8_t mac_addr[6]; efx_dword_t capabilities; uint32_t board_type; uint32_t nevq, nrxq, ntxq; efx_rc_t rc; /* External port identifier using one-based port numbering */ encp->enc_external_port = (uint8_t)enp->en_mcdi.em_emip.emi_port; /* Board configuration */ if ((rc = efx_mcdi_get_board_cfg(enp, &board_type, &capabilities, mac_addr)) != 0) goto fail1; EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); encp->enc_board_type = board_type; /* * There is no possibility to determine the number of PFs on Siena * by issuing MCDI request, and it is not an easy task to find the * value based on the board type, so 'enc_hw_pf_count' is set to 1 */ encp->enc_hw_pf_count = 1; /* Additional capabilities */ encp->enc_clk_mult = 1; if (EFX_DWORD_FIELD(capabilities, MC_CMD_CAPABILITIES_TURBO)) { enp->en_features |= EFX_FEATURE_TURBO; if (EFX_DWORD_FIELD(capabilities, MC_CMD_CAPABILITIES_TURBO_ACTIVE)) { encp->enc_clk_mult = 2; } } encp->enc_evq_timer_quantum_ns = EFX_EVQ_SIENA_TIMER_QUANTUM_NS / encp->enc_clk_mult; encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; /* When hash header insertion is enabled, Siena inserts 16 bytes */ encp->enc_rx_prefix_size = 16; /* Alignment for receive packet DMA buffers */ encp->enc_rx_buf_align_start = 1; encp->enc_rx_buf_align_end = 1; /* Alignment for WPTR updates */ encp->enc_rx_push_align = 1; /* There is one RSS context per function */ encp->enc_rx_scale_max_exclusive_contexts = 1; encp->enc_tx_dma_desc_size_max = EFX_MASK32(FSF_AZ_TX_KER_BYTE_COUNT); /* Fragments must not span 4k boundaries. */ encp->enc_tx_dma_desc_boundary = 4096; /* Resource limits */ rc = efx_mcdi_get_resource_limits(enp, &nevq, &nrxq, &ntxq); if (rc != 0) { if (rc != ENOTSUP) goto fail2; nevq = 1024; nrxq = EFX_RXQ_LIMIT_TARGET; ntxq = EFX_TXQ_LIMIT_TARGET; } encp->enc_evq_limit = nevq; encp->enc_rxq_limit = MIN(EFX_RXQ_LIMIT_TARGET, nrxq); encp->enc_txq_limit = MIN(EFX_TXQ_LIMIT_TARGET, ntxq); encp->enc_txq_max_ndescs = 4096; encp->enc_buftbl_limit = SIENA_SRAM_ROWS - (encp->enc_txq_limit * EFX_TXQ_DC_NDESCS(EFX_TXQ_DC_SIZE)) - (encp->enc_rxq_limit * EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE)); encp->enc_hw_tx_insert_vlan_enabled = B_FALSE; encp->enc_fw_assisted_tso_enabled = B_FALSE; encp->enc_fw_assisted_tso_v2_enabled = B_FALSE; encp->enc_fw_assisted_tso_v2_n_contexts = 0; encp->enc_allow_set_mac_with_installed_filters = B_TRUE; encp->enc_rx_packed_stream_supported = B_FALSE; encp->enc_rx_var_packed_stream_supported = B_FALSE; /* Siena supports two 10G ports, and 8 lanes of PCIe Gen2 */ encp->enc_required_pcie_bandwidth_mbps = 2 * 10000; encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN2; encp->enc_nvram_update_verify_result_supported = B_FALSE; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn efx_rc_t siena_phy_cfg( __in efx_nic_t *enp) { +#if EFSYS_OPT_PHY_STATS efx_nic_cfg_t *encp = &(enp->en_nic_cfg); +#endif /* EFSYS_OPT_PHY_STATS */ efx_rc_t rc; /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) goto fail1; #if EFSYS_OPT_PHY_STATS /* Convert the MCDI statistic mask into the EFX_PHY_STAT mask */ siena_phy_decode_stats(enp, encp->enc_mcdi_phy_stat_mask, NULL, &encp->enc_phy_stat_mask, NULL); #endif /* EFSYS_OPT_PHY_STATS */ return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } +#define SIENA_BIU_MAGIC0 0x01234567 +#define SIENA_BIU_MAGIC1 0xfedcba98 + +static __checkReturn efx_rc_t +siena_nic_biu_test( + __in efx_nic_t *enp) +{ + efx_oword_t oword; + efx_rc_t rc; + + /* + * Write magic values to scratch registers 0 and 1, then + * verify that the values were written correctly. Interleave + * the accesses to ensure that the BIU is not just reading + * back the cached value that was last written. + */ + EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC0); + EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); + + EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC1); + EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); + + EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); + if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC0) { + rc = EIO; + goto fail1; + } + + EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); + if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC1) { + rc = EIO; + goto fail2; + } + + /* + * Perform the same test, with the values swapped. This + * ensures that subsequent tests don't start with the correct + * values already written into the scratch registers. + */ + EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC1); + EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); + + EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, SIENA_BIU_MAGIC0); + EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); + + EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); + if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC1) { + rc = EIO; + goto fail3; + } + + EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); + if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != SIENA_BIU_MAGIC0) { + rc = EIO; + goto fail4; + } + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + __checkReturn efx_rc_t siena_nic_probe( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); siena_link_state_t sls; unsigned int mask; efx_oword_t oword; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); /* Test BIU */ - if ((rc = efx_nic_biu_test(enp)) != 0) + if ((rc = siena_nic_biu_test(enp)) != 0) goto fail1; /* Clear the region register */ EFX_POPULATE_OWORD_4(oword, FRF_AZ_ADR_REGION0, 0, FRF_AZ_ADR_REGION1, (1 << 16), FRF_AZ_ADR_REGION2, (2 << 16), FRF_AZ_ADR_REGION3, (3 << 16)); EFX_BAR_WRITEO(enp, FR_AZ_ADR_REGION_REG, &oword); /* Read clear any assertion state */ if ((rc = efx_mcdi_read_assertion(enp)) != 0) goto fail2; /* Exit the assertion handler */ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) goto fail3; /* Wrestle control from the BMC */ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0) goto fail4; if ((rc = siena_board_cfg(enp)) != 0) goto fail5; if ((rc = siena_phy_cfg(enp)) != 0) goto fail6; /* Obtain the default PHY advertised capabilities */ if ((rc = siena_nic_reset(enp)) != 0) goto fail7; if ((rc = siena_phy_get_link(enp, &sls)) != 0) goto fail8; epp->ep_default_adv_cap_mask = sls.sls_adv_cap_mask; epp->ep_adv_cap_mask = sls.sls_adv_cap_mask; #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM if ((rc = siena_nic_get_partn_mask(enp, &mask)) != 0) goto fail9; enp->en_u.siena.enu_partn_mask = mask; #endif #if EFSYS_OPT_MAC_STATS /* Wipe the MAC statistics */ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0) goto fail10; #endif #if EFSYS_OPT_LOOPBACK if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0) goto fail11; #endif #if EFSYS_OPT_MON_STATS if ((rc = mcdi_mon_cfg_build(enp)) != 0) goto fail12; #endif encp->enc_features = enp->en_features; return (0); #if EFSYS_OPT_MON_STATS fail12: EFSYS_PROBE(fail12); #endif #if EFSYS_OPT_LOOPBACK fail11: EFSYS_PROBE(fail11); #endif #if EFSYS_OPT_MAC_STATS fail10: EFSYS_PROBE(fail10); #endif #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM fail9: EFSYS_PROBE(fail9); #endif fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t siena_nic_reset( __in efx_nic_t *enp) { efx_mcdi_req_t req; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); /* siena_nic_reset() is called to recover from BADASSERT failures. */ if ((rc = efx_mcdi_read_assertion(enp)) != 0) goto fail1; if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) goto fail2; /* * Bug24908: ENTITY_RESET_IN_LEN is non zero but zero may be supplied * for backwards compatibility with PORT_RESET_IN_LEN. */ EFX_STATIC_ASSERT(MC_CMD_ENTITY_RESET_OUT_LEN == 0); req.emr_cmd = MC_CMD_ENTITY_RESET; req.emr_in_buf = NULL; req.emr_in_length = 0; req.emr_out_buf = NULL; req.emr_out_length = 0; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (0); } static void siena_nic_rx_cfg( __in efx_nic_t *enp) { efx_oword_t oword; /* * RX_INGR_EN is always enabled on Siena, because we rely on * the RX parser to be resiliant to missing SOP/EOP. */ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_INGR_EN, 1); EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword); /* Disable parsing of additional 802.1Q in Q packets */ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES, 0); EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); } static void siena_nic_usrev_dis( __in efx_nic_t *enp) { efx_oword_t oword; EFX_POPULATE_OWORD_1(oword, FRF_CZ_USREV_DIS, 1); EFX_BAR_WRITEO(enp, FR_CZ_USR_EV_CFG, &oword); } __checkReturn efx_rc_t siena_nic_init( __in efx_nic_t *enp) { efx_rc_t rc; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); /* Enable reporting of some events (e.g. link change) */ if ((rc = efx_mcdi_log_ctrl(enp)) != 0) goto fail1; siena_sram_init(enp); /* Configure Siena's RX block */ siena_nic_rx_cfg(enp); /* Disable USR_EVents for now */ siena_nic_usrev_dis(enp); /* bug17057: Ensure set_link is called */ if ((rc = siena_phy_reconfigure(enp)) != 0) goto fail2; enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V1; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void siena_nic_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } void siena_nic_unprobe( __in efx_nic_t *enp) { #if EFSYS_OPT_MON_STATS mcdi_mon_cfg_free(enp); #endif /* EFSYS_OPT_MON_STATS */ (void) efx_mcdi_drv_attach(enp, B_FALSE); } #if EFSYS_OPT_DIAG -static efx_register_set_t __siena_registers[] = { +static siena_register_set_t __siena_registers[] = { { FR_AZ_ADR_REGION_REG_OFST, 0, 1 }, { FR_CZ_USR_EV_CFG_OFST, 0, 1 }, { FR_AZ_RX_CFG_REG_OFST, 0, 1 }, { FR_AZ_TX_CFG_REG_OFST, 0, 1 }, { FR_AZ_TX_RESERVED_REG_OFST, 0, 1 }, { FR_AZ_SRM_TX_DC_CFG_REG_OFST, 0, 1 }, { FR_AZ_RX_DC_CFG_REG_OFST, 0, 1 }, { FR_AZ_RX_DC_PF_WM_REG_OFST, 0, 1 }, { FR_AZ_DP_CTRL_REG_OFST, 0, 1 }, { FR_BZ_RX_RSS_TKEY_REG_OFST, 0, 1}, { FR_CZ_RX_RSS_IPV6_REG1_OFST, 0, 1}, { FR_CZ_RX_RSS_IPV6_REG2_OFST, 0, 1}, { FR_CZ_RX_RSS_IPV6_REG3_OFST, 0, 1} }; static const uint32_t __siena_register_masks[] = { 0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x000103FF, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000, 0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF, 0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF, 0x001FFFFF, 0x00000000, 0x00000000, 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x000003FF, 0x00000000, 0x00000000, 0x00000000, 0x00000FFF, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000 }; -static efx_register_set_t __siena_tables[] = { +static siena_register_set_t __siena_tables[] = { { FR_AZ_RX_FILTER_TBL0_OFST, FR_AZ_RX_FILTER_TBL0_STEP, FR_AZ_RX_FILTER_TBL0_ROWS }, { FR_CZ_RX_MAC_FILTER_TBL0_OFST, FR_CZ_RX_MAC_FILTER_TBL0_STEP, FR_CZ_RX_MAC_FILTER_TBL0_ROWS }, { FR_AZ_RX_DESC_PTR_TBL_OFST, FR_AZ_RX_DESC_PTR_TBL_STEP, FR_CZ_RX_DESC_PTR_TBL_ROWS }, { FR_AZ_TX_DESC_PTR_TBL_OFST, FR_AZ_TX_DESC_PTR_TBL_STEP, FR_CZ_TX_DESC_PTR_TBL_ROWS }, { FR_AZ_TIMER_TBL_OFST, FR_AZ_TIMER_TBL_STEP, FR_CZ_TIMER_TBL_ROWS }, { FR_CZ_TX_FILTER_TBL0_OFST, FR_CZ_TX_FILTER_TBL0_STEP, FR_CZ_TX_FILTER_TBL0_ROWS }, { FR_CZ_TX_MAC_FILTER_TBL0_OFST, FR_CZ_TX_MAC_FILTER_TBL0_STEP, FR_CZ_TX_MAC_FILTER_TBL0_ROWS } }; static const uint32_t __siena_table_masks[] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000003FF, 0xFFFF0FFF, 0xFFFFFFFF, 0x00000E7F, 0x00000000, 0xFFFFFFFE, 0x0FFFFFFF, 0x01800000, 0x00000000, 0xFFFFFFFE, 0x0FFFFFFF, 0x0C000000, 0x00000000, 0x3FFFFFFF, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000013FF, 0xFFFF07FF, 0xFFFFFFFF, 0x0000007F, 0x00000000, }; __checkReturn efx_rc_t +siena_nic_test_registers( + __in efx_nic_t *enp, + __in siena_register_set_t *rsp, + __in size_t count) +{ + unsigned int bit; + efx_oword_t original; + efx_oword_t reg; + efx_oword_t buf; + efx_rc_t rc; + + while (count > 0) { + /* This function is only suitable for registers */ + EFSYS_ASSERT(rsp->rows == 1); + + /* bit sweep on and off */ + EFSYS_BAR_READO(enp->en_esbp, rsp->address, &original, + B_TRUE); + for (bit = 0; bit < 128; bit++) { + /* Is this bit in the mask? */ + if (~(rsp->mask.eo_u32[bit >> 5]) & (1 << bit)) + continue; + + /* Test this bit can be set in isolation */ + reg = original; + EFX_AND_OWORD(reg, rsp->mask); + EFX_SET_OWORD_BIT(reg, bit); + + EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, ®, + B_TRUE); + EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf, + B_TRUE); + + EFX_AND_OWORD(buf, rsp->mask); + if (memcmp(®, &buf, sizeof (reg))) { + rc = EIO; + goto fail1; + } + + /* Test this bit can be cleared in isolation */ + EFX_OR_OWORD(reg, rsp->mask); + EFX_CLEAR_OWORD_BIT(reg, bit); + + EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, ®, + B_TRUE); + EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf, + B_TRUE); + + EFX_AND_OWORD(buf, rsp->mask); + if (memcmp(®, &buf, sizeof (reg))) { + rc = EIO; + goto fail2; + } + } + + /* Restore the old value */ + EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, + B_TRUE); + + --count; + ++rsp; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + /* Restore the old value */ + EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE); + + return (rc); +} + + __checkReturn efx_rc_t +siena_nic_test_tables( + __in efx_nic_t *enp, + __in siena_register_set_t *rsp, + __in efx_pattern_type_t pattern, + __in size_t count) +{ + efx_sram_pattern_fn_t func; + unsigned int index; + unsigned int address; + efx_oword_t reg; + efx_oword_t buf; + efx_rc_t rc; + + EFSYS_ASSERT(pattern < EFX_PATTERN_NTYPES); + func = __efx_sram_pattern_fns[pattern]; + + while (count > 0) { + /* Write */ + address = rsp->address; + for (index = 0; index < rsp->rows; ++index) { + func(2 * index + 0, B_FALSE, ®.eo_qword[0]); + func(2 * index + 1, B_FALSE, ®.eo_qword[1]); + EFX_AND_OWORD(reg, rsp->mask); + EFSYS_BAR_WRITEO(enp->en_esbp, address, ®, B_TRUE); + + address += rsp->step; + } + + /* Read */ + address = rsp->address; + for (index = 0; index < rsp->rows; ++index) { + func(2 * index + 0, B_FALSE, ®.eo_qword[0]); + func(2 * index + 1, B_FALSE, ®.eo_qword[1]); + EFX_AND_OWORD(reg, rsp->mask); + EFSYS_BAR_READO(enp->en_esbp, address, &buf, B_TRUE); + if (memcmp(®, &buf, sizeof (reg))) { + rc = EIO; + goto fail1; + } + + address += rsp->step; + } + + ++rsp; + --count; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + __checkReturn efx_rc_t siena_nic_register_test( __in efx_nic_t *enp) { - efx_register_set_t *rsp; + siena_register_set_t *rsp; const uint32_t *dwordp; unsigned int nitems; unsigned int count; efx_rc_t rc; /* Fill out the register mask entries */ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_register_masks) == EFX_ARRAY_SIZE(__siena_registers) * 4); nitems = EFX_ARRAY_SIZE(__siena_registers); dwordp = __siena_register_masks; for (count = 0; count < nitems; ++count) { rsp = __siena_registers + count; rsp->mask.eo_u32[0] = *dwordp++; rsp->mask.eo_u32[1] = *dwordp++; rsp->mask.eo_u32[2] = *dwordp++; rsp->mask.eo_u32[3] = *dwordp++; } /* Fill out the register table entries */ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_table_masks) == EFX_ARRAY_SIZE(__siena_tables) * 4); nitems = EFX_ARRAY_SIZE(__siena_tables); dwordp = __siena_table_masks; for (count = 0; count < nitems; ++count) { rsp = __siena_tables + count; rsp->mask.eo_u32[0] = *dwordp++; rsp->mask.eo_u32[1] = *dwordp++; rsp->mask.eo_u32[2] = *dwordp++; rsp->mask.eo_u32[3] = *dwordp++; } - if ((rc = efx_nic_test_registers(enp, __siena_registers, + if ((rc = siena_nic_test_registers(enp, __siena_registers, EFX_ARRAY_SIZE(__siena_registers))) != 0) goto fail1; - if ((rc = efx_nic_test_tables(enp, __siena_tables, + if ((rc = siena_nic_test_tables(enp, __siena_tables, EFX_PATTERN_BYTE_ALTERNATE, EFX_ARRAY_SIZE(__siena_tables))) != 0) goto fail2; - if ((rc = efx_nic_test_tables(enp, __siena_tables, + if ((rc = siena_nic_test_tables(enp, __siena_tables, EFX_PATTERN_BYTE_CHANGING, EFX_ARRAY_SIZE(__siena_tables))) != 0) goto fail3; - if ((rc = efx_nic_test_tables(enp, __siena_tables, + if ((rc = siena_nic_test_tables(enp, __siena_tables, EFX_PATTERN_BIT_SWEEP, EFX_ARRAY_SIZE(__siena_tables))) != 0) goto fail4; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_DIAG */ #endif /* EFSYS_OPT_SIENA */ Index: projects/clang700-import/sys/dev/sfxge/common/siena_phy.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/common/siena_phy.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/common/siena_phy.c (revision 340918) @@ -1,802 +1,804 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_SIENA static void siena_phy_decode_cap( __in uint32_t mcdi_cap, __out uint32_t *maskp) { uint32_t mask; mask = 0; if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) mask |= (1 << EFX_PHY_CAP_10HDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) mask |= (1 << EFX_PHY_CAP_10FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) mask |= (1 << EFX_PHY_CAP_100HDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) mask |= (1 << EFX_PHY_CAP_100FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) mask |= (1 << EFX_PHY_CAP_1000HDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_1000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_10000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) mask |= (1 << EFX_PHY_CAP_PAUSE); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) mask |= (1 << EFX_PHY_CAP_ASYM); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) mask |= (1 << EFX_PHY_CAP_AN); *maskp = mask; } static void siena_phy_decode_link_mode( __in efx_nic_t *enp, __in uint32_t link_flags, __in unsigned int speed, __in unsigned int fcntl, __out efx_link_mode_t *link_modep, __out unsigned int *fcntlp) { boolean_t fd = !!(link_flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN)); boolean_t up = !!(link_flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN)); _NOTE(ARGUNUSED(enp)) if (!up) *link_modep = EFX_LINK_DOWN; else if (speed == 10000 && fd) *link_modep = EFX_LINK_10000FDX; else if (speed == 1000) *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX; else if (speed == 100) *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX; else if (speed == 10) *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX; else *link_modep = EFX_LINK_UNKNOWN; if (fcntl == MC_CMD_FCNTL_OFF) *fcntlp = 0; else if (fcntl == MC_CMD_FCNTL_RESPOND) *fcntlp = EFX_FCNTL_RESPOND; else if (fcntl == MC_CMD_FCNTL_BIDIR) *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; else { EFSYS_PROBE1(mc_pcol_error, int, fcntl); *fcntlp = 0; } } void siena_phy_link_ev( __in efx_nic_t *enp, __in efx_qword_t *eqp, __out efx_link_mode_t *link_modep) { efx_port_t *epp = &(enp->en_port); unsigned int link_flags; unsigned int speed; unsigned int fcntl; efx_link_mode_t link_mode; uint32_t lp_cap_mask; /* * Convert the LINKCHANGE speed enumeration into mbit/s, in the * same way as GET_LINK encodes the speed */ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) { case MCDI_EVENT_LINKCHANGE_SPEED_100M: speed = 100; break; case MCDI_EVENT_LINKCHANGE_SPEED_1G: speed = 1000; break; case MCDI_EVENT_LINKCHANGE_SPEED_10G: speed = 10000; break; default: speed = 0; break; } link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS); siena_phy_decode_link_mode(enp, link_flags, speed, MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL), &link_mode, &fcntl); siena_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP), &lp_cap_mask); /* * It's safe to update ep_lp_cap_mask without the driver's port lock * because presumably any concurrently running efx_port_poll() is * only going to arrive at the same value. * * ep_fcntl has two meanings. It's either the link common fcntl * (if the PHY supports AN), or it's the forced link state. If * the former, it's safe to update the value for the same reason as * for ep_lp_cap_mask. If the latter, then just ignore the value, * because we can race with efx_mac_fcntl_set(). */ epp->ep_lp_cap_mask = lp_cap_mask; if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN)) epp->ep_fcntl = fcntl; *link_modep = link_mode; } __checkReturn efx_rc_t siena_phy_power( __in efx_nic_t *enp, __in boolean_t power) { efx_rc_t rc; if (!power) return (0); /* Check if the PHY is a zombie */ if ((rc = siena_phy_verify(enp)) != 0) goto fail1; enp->en_reset_flags |= EFX_RESET_PHY; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t siena_phy_get_link( __in efx_nic_t *enp, __out siena_link_state_t *slsp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN, MC_CMD_GET_LINK_OUT_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_LINK; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_LINK_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) { rc = EMSGSIZE; goto fail2; } siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP), &slsp->sls_adv_cap_mask); siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP), &slsp->sls_lp_cap_mask); siena_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS), MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED), MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL), &slsp->sls_link_mode, &slsp->sls_fcntl); #if EFSYS_OPT_LOOPBACK /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS); EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD); slsp->sls_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE); #endif /* EFSYS_OPT_LOOPBACK */ slsp->sls_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t siena_phy_reconfigure( __in efx_nic_t *enp) { efx_port_t *epp = &(enp->en_port); efx_mcdi_req_t req; uint8_t payload[MAX(MAX(MC_CMD_SET_ID_LED_IN_LEN, MC_CMD_SET_ID_LED_OUT_LEN), MAX(MC_CMD_SET_LINK_IN_LEN, MC_CMD_SET_LINK_OUT_LEN))]; uint32_t cap_mask; +#if EFSYS_OPT_PHY_LED_CONTROL unsigned int led_mode; +#endif unsigned int speed; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_LINK; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_LINK_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN; cap_mask = epp->ep_adv_cap_mask; MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP, PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1, PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1, PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1, PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1, PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1, PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1, PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1, PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1, PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1, PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1); #if EFSYS_OPT_LOOPBACK MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, epp->ep_loopback_type); switch (epp->ep_loopback_link_mode) { case EFX_LINK_100FDX: speed = 100; break; case EFX_LINK_1000FDX: speed = 1000; break; case EFX_LINK_10000FDX: speed = 10000; break; default: speed = 0; } #else MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE); speed = 0; #endif /* EFSYS_OPT_LOOPBACK */ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed); #if EFSYS_OPT_PHY_FLAGS MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags); #else MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0); #endif /* EFSYS_OPT_PHY_FLAGS */ efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } /* And set the blink mode */ (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_SET_ID_LED; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN; #if EFSYS_OPT_PHY_LED_CONTROL switch (epp->ep_phy_led_mode) { case EFX_PHY_LED_DEFAULT: led_mode = MC_CMD_LED_DEFAULT; break; case EFX_PHY_LED_OFF: led_mode = MC_CMD_LED_OFF; break; case EFX_PHY_LED_ON: led_mode = MC_CMD_LED_ON; break; default: EFSYS_ASSERT(0); led_mode = MC_CMD_LED_DEFAULT; } MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode); #else MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT); #endif /* EFSYS_OPT_PHY_LED_CONTROL */ efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t siena_phy_verify( __in efx_nic_t *enp) { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN, MC_CMD_GET_PHY_STATE_OUT_LEN)]; uint32_t state; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PHY_STATE; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) { rc = EMSGSIZE; goto fail2; } state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE); if (state != MC_CMD_PHY_STATE_OK) { if (state != MC_CMD_PHY_STATE_ZOMBIE) EFSYS_PROBE1(mc_pcol_error, int, state); rc = ENOTACTIVE; goto fail3; } return (0); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t siena_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip) { _NOTE(ARGUNUSED(enp, ouip)) return (ENOTSUP); } #if EFSYS_OPT_PHY_STATS #define SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \ _mc_record, _efx_record) \ if ((_vmask) & (1ULL << (_mc_record))) { \ (_smask) |= (1ULL << (_efx_record)); \ if ((_stat) != NULL && !EFSYS_MEM_IS_NULL(_esmp)) { \ efx_dword_t dword; \ EFSYS_MEM_READD(_esmp, (_mc_record) * 4, &dword);\ (_stat)[_efx_record] = \ EFX_DWORD_FIELD(dword, EFX_DWORD_0); \ } \ } #define SIENA_SIMPLE_STAT_SET2(_vmask, _esmp, _smask, _stat, _record) \ SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \ MC_CMD_ ## _record, \ EFX_PHY_STAT_ ## _record) void siena_phy_decode_stats( __in efx_nic_t *enp, __in uint32_t vmask, __in_opt efsys_mem_t *esmp, __out_opt uint64_t *smaskp, __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat) { uint64_t smask = 0; _NOTE(ARGUNUSED(enp)) SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, OUI); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_LINK_UP); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_RX_FAULT); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_TX_FAULT); if (vmask & (1 << MC_CMD_PMA_PMD_SIGNAL)) { smask |= ((1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_A) | (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_B) | (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_C) | (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_D)); if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) { efx_dword_t dword; uint32_t sig; EFSYS_MEM_READD(esmp, 4 * MC_CMD_PMA_PMD_SIGNAL, &dword); sig = EFX_DWORD_FIELD(dword, EFX_DWORD_0); stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_A] = (sig >> 1) & 1; stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_B] = (sig >> 2) & 1; stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_C] = (sig >> 3) & 1; stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_D] = (sig >> 4) & 1; } } SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_A, EFX_PHY_STAT_SNR_A); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_B, EFX_PHY_STAT_SNR_B); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_C, EFX_PHY_STAT_SNR_C); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_D, EFX_PHY_STAT_SNR_D); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_LINK_UP); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_RX_FAULT); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_TX_FAULT); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BER); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BLOCK_ERRORS); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_LINK_UP, EFX_PHY_STAT_PHY_XS_LINK_UP); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_RX_FAULT, EFX_PHY_STAT_PHY_XS_RX_FAULT); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_TX_FAULT, EFX_PHY_STAT_PHY_XS_TX_FAULT); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_ALIGN, EFX_PHY_STAT_PHY_XS_ALIGN); if (vmask & (1 << MC_CMD_PHYXS_SYNC)) { smask |= ((1 << EFX_PHY_STAT_PHY_XS_SYNC_A) | (1 << EFX_PHY_STAT_PHY_XS_SYNC_B) | (1 << EFX_PHY_STAT_PHY_XS_SYNC_C) | (1 << EFX_PHY_STAT_PHY_XS_SYNC_D)); if (stat != NULL && !EFSYS_MEM_IS_NULL(esmp)) { efx_dword_t dword; uint32_t sync; EFSYS_MEM_READD(esmp, 4 * MC_CMD_PHYXS_SYNC, &dword); sync = EFX_DWORD_FIELD(dword, EFX_DWORD_0); stat[EFX_PHY_STAT_PHY_XS_SYNC_A] = (sync >> 0) & 1; stat[EFX_PHY_STAT_PHY_XS_SYNC_B] = (sync >> 1) & 1; stat[EFX_PHY_STAT_PHY_XS_SYNC_C] = (sync >> 2) & 1; stat[EFX_PHY_STAT_PHY_XS_SYNC_D] = (sync >> 3) & 1; } } SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_LINK_UP); SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_COMPLETE); SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_CL22_LINK_UP, EFX_PHY_STAT_CL22EXT_LINK_UP); if (smaskp != NULL) *smaskp = smask; } __checkReturn efx_rc_t siena_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t vmask = encp->enc_mcdi_phy_stat_mask; uint64_t smask; efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_PHY_STATS_IN_LEN, MC_CMD_PHY_STATS_OUT_DMA_LEN)]; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_PHY_STATS; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_PHY_STATS_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_PHY_STATS_OUT_DMA_LEN; MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_LO, EFSYS_MEM_ADDR(esmp) & 0xffffffff); MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_HI, EFSYS_MEM_ADDR(esmp) >> 32); efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN); siena_phy_decode_stats(enp, vmask, esmp, &smask, stat); EFSYS_ASSERT(smask == encp->enc_phy_stat_mask); return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (0); } #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_BIST __checkReturn efx_rc_t siena_phy_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type) { efx_rc_t rc; if ((rc = efx_mcdi_bist_start(enp, type)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static __checkReturn unsigned long siena_phy_sft9001_bist_status( __in uint16_t code) { switch (code) { case MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY: return (EFX_PHY_CABLE_STATUS_BUSY); case MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT: return (EFX_PHY_CABLE_STATUS_INTERPAIRSHORT); case MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT: return (EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT); case MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN: return (EFX_PHY_CABLE_STATUS_OPEN); case MC_CMD_POLL_BIST_SFT9001_PAIR_OK: return (EFX_PHY_CABLE_STATUS_OK); default: return (EFX_PHY_CABLE_STATUS_INVALID); } } __checkReturn efx_rc_t siena_phy_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt __drv_when(count > 0, __notnull) uint32_t *value_maskp, __out_ecount_opt(count) __drv_when(count > 0, __notnull) unsigned long *valuesp, __in size_t count) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN, MCDI_CTL_SDU_LEN_MAX)]; uint32_t value_mask = 0; efx_mcdi_req_t req; uint32_t result; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_POLL_BIST; req.emr_in_buf = payload; req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MCDI_CTL_SDU_LEN_MAX; efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; goto fail1; } if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) { rc = EMSGSIZE; goto fail2; } if (count > 0) (void) memset(valuesp, '\0', count * sizeof (unsigned long)); result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT); /* Extract PHY specific results */ if (result == MC_CMD_POLL_BIST_PASSED && encp->enc_phy_type == EFX_PHY_SFT9001B && req.emr_out_length_used >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN && (type == EFX_BIST_TYPE_PHY_CABLE_SHORT || type == EFX_BIST_TYPE_PHY_CABLE_LONG)) { uint16_t word; if (count > EFX_BIST_PHY_CABLE_LENGTH_A) { if (valuesp != NULL) valuesp[EFX_BIST_PHY_CABLE_LENGTH_A] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_A); } if (count > EFX_BIST_PHY_CABLE_LENGTH_B) { if (valuesp != NULL) valuesp[EFX_BIST_PHY_CABLE_LENGTH_B] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B); value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_B); } if (count > EFX_BIST_PHY_CABLE_LENGTH_C) { if (valuesp != NULL) valuesp[EFX_BIST_PHY_CABLE_LENGTH_C] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C); value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_C); } if (count > EFX_BIST_PHY_CABLE_LENGTH_D) { if (valuesp != NULL) valuesp[EFX_BIST_PHY_CABLE_LENGTH_D] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D); value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_D); } if (count > EFX_BIST_PHY_CABLE_STATUS_A) { if (valuesp != NULL) { word = MCDI_OUT_WORD(req, POLL_BIST_OUT_SFT9001_CABLE_STATUS_A); valuesp[EFX_BIST_PHY_CABLE_STATUS_A] = siena_phy_sft9001_bist_status(word); } value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_A); } if (count > EFX_BIST_PHY_CABLE_STATUS_B) { if (valuesp != NULL) { word = MCDI_OUT_WORD(req, POLL_BIST_OUT_SFT9001_CABLE_STATUS_B); valuesp[EFX_BIST_PHY_CABLE_STATUS_B] = siena_phy_sft9001_bist_status(word); } value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_B); } if (count > EFX_BIST_PHY_CABLE_STATUS_C) { if (valuesp != NULL) { word = MCDI_OUT_WORD(req, POLL_BIST_OUT_SFT9001_CABLE_STATUS_C); valuesp[EFX_BIST_PHY_CABLE_STATUS_C] = siena_phy_sft9001_bist_status(word); } value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_C); } if (count > EFX_BIST_PHY_CABLE_STATUS_D) { if (valuesp != NULL) { word = MCDI_OUT_WORD(req, POLL_BIST_OUT_SFT9001_CABLE_STATUS_D); valuesp[EFX_BIST_PHY_CABLE_STATUS_D] = siena_phy_sft9001_bist_status(word); } value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_D); } } else if (result == MC_CMD_POLL_BIST_FAILED && encp->enc_phy_type == EFX_PHY_QLX111V && req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN && count > EFX_BIST_FAULT_CODE) { if (valuesp != NULL) valuesp[EFX_BIST_FAULT_CODE] = MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST); value_mask |= 1 << EFX_BIST_FAULT_CODE; } if (value_maskp != NULL) *value_maskp = value_mask; EFSYS_ASSERT(resultp != NULL); if (result == MC_CMD_POLL_BIST_RUNNING) *resultp = EFX_BIST_RESULT_RUNNING; else if (result == MC_CMD_POLL_BIST_PASSED) *resultp = EFX_BIST_RESULT_PASSED; else *resultp = EFX_BIST_RESULT_FAILED; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void siena_phy_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type) { /* There is no way to stop BIST on Siena */ _NOTE(ARGUNUSED(enp, type)) } #endif /* EFSYS_OPT_BIST */ #endif /* EFSYS_OPT_SIENA */ Index: projects/clang700-import/sys/dev/sfxge/sfxge_rx.c =================================================================== --- projects/clang700-import/sys/dev/sfxge/sfxge_rx.c (revision 340917) +++ projects/clang700-import/sys/dev/sfxge/sfxge_rx.c (revision 340918) @@ -1,1426 +1,1426 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010-2016 Solarflare Communications Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #endif #include "common/efx.h" #include "sfxge.h" #include "sfxge_rx.h" #define RX_REFILL_THRESHOLD(_entries) (EFX_RXQ_LIMIT(_entries) * 9 / 10) #ifdef SFXGE_LRO SYSCTL_NODE(_hw_sfxge, OID_AUTO, lro, CTLFLAG_RD, NULL, "Large receive offload (LRO) parameters"); #define SFXGE_LRO_PARAM(_param) SFXGE_PARAM(lro._param) /* Size of the LRO hash table. Must be a power of 2. A larger table * means we can accelerate a larger number of streams. */ static unsigned lro_table_size = 128; TUNABLE_INT(SFXGE_LRO_PARAM(table_size), &lro_table_size); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, table_size, CTLFLAG_RDTUN, &lro_table_size, 0, "Size of the LRO hash table (must be a power of 2)"); /* Maximum length of a hash chain. If chains get too long then the lookup * time increases and may exceed the benefit of LRO. */ static unsigned lro_chain_max = 20; TUNABLE_INT(SFXGE_LRO_PARAM(chain_max), &lro_chain_max); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, chain_max, CTLFLAG_RDTUN, &lro_chain_max, 0, "The maximum length of a hash chain"); /* Maximum time (in ticks) that a connection can be idle before it's LRO * state is discarded. */ static unsigned lro_idle_ticks; /* initialised in sfxge_rx_init() */ TUNABLE_INT(SFXGE_LRO_PARAM(idle_ticks), &lro_idle_ticks); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, idle_ticks, CTLFLAG_RDTUN, &lro_idle_ticks, 0, "The maximum time (in ticks) that a connection can be idle " "before it's LRO state is discarded"); /* Number of packets with payload that must arrive in-order before a * connection is eligible for LRO. The idea is we should avoid coalescing * segments when the sender is in slow-start because reducing the ACK rate * can damage performance. */ static int lro_slow_start_packets = 2000; TUNABLE_INT(SFXGE_LRO_PARAM(slow_start_packets), &lro_slow_start_packets); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, slow_start_packets, CTLFLAG_RDTUN, &lro_slow_start_packets, 0, "Number of packets with payload that must arrive in-order before " "a connection is eligible for LRO"); /* Number of packets with payload that must arrive in-order following loss * before a connection is eligible for LRO. The idea is we should avoid * coalescing segments when the sender is recovering from loss, because * reducing the ACK rate can damage performance. */ static int lro_loss_packets = 20; TUNABLE_INT(SFXGE_LRO_PARAM(loss_packets), &lro_loss_packets); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, loss_packets, CTLFLAG_RDTUN, &lro_loss_packets, 0, "Number of packets with payload that must arrive in-order " "following loss before a connection is eligible for LRO"); /* Flags for sfxge_lro_conn::l2_id; must not collide with EVL_VLID_MASK */ #define SFXGE_LRO_L2_ID_VLAN 0x4000 #define SFXGE_LRO_L2_ID_IPV6 0x8000 #define SFXGE_LRO_CONN_IS_VLAN_ENCAP(c) ((c)->l2_id & SFXGE_LRO_L2_ID_VLAN) #define SFXGE_LRO_CONN_IS_TCPIPV4(c) (!((c)->l2_id & SFXGE_LRO_L2_ID_IPV6)) /* Compare IPv6 addresses, avoiding conditional branches */ static unsigned long ipv6_addr_cmp(const struct in6_addr *left, const struct in6_addr *right) { #if LONG_BIT == 64 const uint64_t *left64 = (const uint64_t *)left; const uint64_t *right64 = (const uint64_t *)right; return (left64[0] - right64[0]) | (left64[1] - right64[1]); #else return (left->s6_addr32[0] - right->s6_addr32[0]) | (left->s6_addr32[1] - right->s6_addr32[1]) | (left->s6_addr32[2] - right->s6_addr32[2]) | (left->s6_addr32[3] - right->s6_addr32[3]); #endif } #endif /* SFXGE_LRO */ void sfxge_rx_qflush_done(struct sfxge_rxq *rxq) { rxq->flush_state = SFXGE_FLUSH_DONE; } void sfxge_rx_qflush_failed(struct sfxge_rxq *rxq) { rxq->flush_state = SFXGE_FLUSH_FAILED; } #ifdef RSS static uint8_t toep_key[RSS_KEYSIZE]; #else static uint8_t toep_key[] = { 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; #endif static void sfxge_rx_post_refill(void *arg) { struct sfxge_rxq *rxq = arg; struct sfxge_softc *sc; unsigned int index; struct sfxge_evq *evq; uint16_t magic; sc = rxq->sc; index = rxq->index; evq = sc->evq[index]; magic = sfxge_sw_ev_rxq_magic(SFXGE_SW_EV_RX_QREFILL, rxq); /* This is guaranteed due to the start/stop order of rx and ev */ KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq not started")); KASSERT(rxq->init_state == SFXGE_RXQ_STARTED, ("rxq not started")); efx_ev_qpost(evq->common, magic); } static void sfxge_rx_schedule_refill(struct sfxge_rxq *rxq, boolean_t retrying) { /* Initially retry after 100 ms, but back off in case of * repeated failures as we probably have to wait for the * administrator to raise the pool limit. */ if (retrying) rxq->refill_delay = min(rxq->refill_delay * 2, 10 * hz); else rxq->refill_delay = hz / 10; callout_reset_curcpu(&rxq->refill_callout, rxq->refill_delay, sfxge_rx_post_refill, rxq); } #define SFXGE_REFILL_BATCH 64 static void sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying) { struct sfxge_softc *sc; unsigned int index; struct sfxge_evq *evq; unsigned int batch; unsigned int rxfill; unsigned int mblksize; int ntodo; efsys_dma_addr_t addr[SFXGE_REFILL_BATCH]; sc = rxq->sc; index = rxq->index; evq = sc->evq[index]; prefetch_read_many(sc->enp); prefetch_read_many(rxq->common); SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) return; rxfill = rxq->added - rxq->completed; KASSERT(rxfill <= EFX_RXQ_LIMIT(rxq->entries), ("rxfill > EFX_RXQ_LIMIT(rxq->entries)")); ntodo = min(EFX_RXQ_LIMIT(rxq->entries) - rxfill, target); KASSERT(ntodo <= EFX_RXQ_LIMIT(rxq->entries), ("ntodo > EFX_RQX_LIMIT(rxq->entries)")); if (ntodo == 0) return; batch = 0; mblksize = sc->rx_buffer_size - sc->rx_buffer_align; while (ntodo-- > 0) { unsigned int id; struct sfxge_rx_sw_desc *rx_desc; bus_dma_segment_t seg; struct mbuf *m; id = (rxq->added + batch) & rxq->ptr_mask; rx_desc = &rxq->queue[id]; KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL")); rx_desc->flags = EFX_DISCARD; m = rx_desc->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_cluster_size); if (m == NULL) break; /* m_len specifies length of area to be mapped for DMA */ m->m_len = mblksize; m->m_data = (caddr_t)P2ROUNDUP((uintptr_t)m->m_data, CACHE_LINE_SIZE); m->m_data += sc->rx_buffer_align; sfxge_map_mbuf_fast(rxq->mem.esm_tag, rxq->mem.esm_map, m, &seg); addr[batch++] = seg.ds_addr; if (batch == SFXGE_REFILL_BATCH) { efx_rx_qpost(rxq->common, addr, mblksize, batch, rxq->completed, rxq->added); rxq->added += batch; batch = 0; } } if (ntodo != 0) sfxge_rx_schedule_refill(rxq, retrying); if (batch != 0) { efx_rx_qpost(rxq->common, addr, mblksize, batch, rxq->completed, rxq->added); rxq->added += batch; } /* Make the descriptors visible to the hardware */ bus_dmamap_sync(rxq->mem.esm_tag, rxq->mem.esm_map, BUS_DMASYNC_PREWRITE); efx_rx_qpush(rxq->common, rxq->added, &rxq->pushed); /* The queue could still be empty if no descriptors were actually * pushed, in which case there will be no event to cause the next * refill, so we must schedule a refill ourselves. */ if(rxq->pushed == rxq->completed) { sfxge_rx_schedule_refill(rxq, retrying); } } void sfxge_rx_qrefill(struct sfxge_rxq *rxq) { if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) return; /* Make sure the queue is full */ sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_TRUE); } static void __sfxge_rx_deliver(struct sfxge_softc *sc, struct mbuf *m) { struct ifnet *ifp = sc->ifnet; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.csum_data = 0xffff; ifp->if_input(ifp, m); } static void sfxge_rx_deliver(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_desc) { struct sfxge_softc *sc = rxq->sc; struct mbuf *m = rx_desc->mbuf; int flags = rx_desc->flags; int csum_flags; /* Convert checksum flags */ csum_flags = (flags & EFX_CKSUM_IPV4) ? (CSUM_IP_CHECKED | CSUM_IP_VALID) : 0; if (flags & EFX_CKSUM_TCPUDP) csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { m->m_pkthdr.flowid = efx_pseudo_hdr_hash_get(rxq->common, EFX_RX_HASHALG_TOEPLITZ, mtod(m, uint8_t *)); /* The hash covers a 4-tuple for TCP only */ M_HASHTYPE_SET(m, (flags & EFX_PKT_IPV4) ? ((flags & EFX_PKT_TCP) ? M_HASHTYPE_RSS_TCP_IPV4 : M_HASHTYPE_RSS_IPV4) : ((flags & EFX_PKT_TCP) ? M_HASHTYPE_RSS_TCP_IPV6 : M_HASHTYPE_RSS_IPV6)); } m->m_data += sc->rx_prefix_size; m->m_len = rx_desc->size - sc->rx_prefix_size; m->m_pkthdr.len = m->m_len; m->m_pkthdr.csum_flags = csum_flags; __sfxge_rx_deliver(sc, rx_desc->mbuf); rx_desc->flags = EFX_DISCARD; rx_desc->mbuf = NULL; } #ifdef SFXGE_LRO static void sfxge_lro_deliver(struct sfxge_lro_state *st, struct sfxge_lro_conn *c) { struct sfxge_softc *sc = st->sc; struct mbuf *m = c->mbuf; struct tcphdr *c_th; int csum_flags; KASSERT(m, ("no mbuf to deliver")); ++st->n_bursts; /* Finish off packet munging and recalculate IP header checksum. */ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *iph = c->nh; iph->ip_len = htons(iph->ip_len); iph->ip_sum = 0; iph->ip_sum = in_cksum_hdr(iph); c_th = (struct tcphdr *)(iph + 1); csum_flags = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID); } else { struct ip6_hdr *iph = c->nh; iph->ip6_plen = htons(iph->ip6_plen); c_th = (struct tcphdr *)(iph + 1); csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR; } c_th->th_win = c->th_last->th_win; c_th->th_ack = c->th_last->th_ack; if (c_th->th_off == c->th_last->th_off) { /* Copy TCP options (take care to avoid going negative). */ int optlen = ((c_th->th_off - 5) & 0xf) << 2u; memcpy(c_th + 1, c->th_last + 1, optlen); } m->m_pkthdr.flowid = c->conn_hash; M_HASHTYPE_SET(m, SFXGE_LRO_CONN_IS_TCPIPV4(c) ? M_HASHTYPE_RSS_TCP_IPV4 : M_HASHTYPE_RSS_TCP_IPV6); m->m_pkthdr.csum_flags = csum_flags; __sfxge_rx_deliver(sc, m); c->mbuf = NULL; c->delivered = 1; } /* Drop the given connection, and add it to the free list. */ static void sfxge_lro_drop(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c) { unsigned bucket; KASSERT(!c->mbuf, ("found orphaned mbuf")); if (c->next_buf.mbuf != NULL) { sfxge_rx_deliver(rxq, &c->next_buf); LIST_REMOVE(c, active_link); } bucket = c->conn_hash & rxq->lro.conns_mask; KASSERT(rxq->lro.conns_n[bucket] > 0, ("LRO: bucket fill level wrong")); --rxq->lro.conns_n[bucket]; TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link); TAILQ_INSERT_HEAD(&rxq->lro.free_conns, c, link); } /* Stop tracking connections that have gone idle in order to keep hash * chains short. */ static void sfxge_lro_purge_idle(struct sfxge_rxq *rxq, unsigned now) { struct sfxge_lro_conn *c; unsigned i; KASSERT(LIST_EMPTY(&rxq->lro.active_conns), ("found active connections")); rxq->lro.last_purge_ticks = now; for (i = 0; i <= rxq->lro.conns_mask; ++i) { if (TAILQ_EMPTY(&rxq->lro.conns[i])) continue; c = TAILQ_LAST(&rxq->lro.conns[i], sfxge_lro_tailq); if (now - c->last_pkt_ticks > lro_idle_ticks) { ++rxq->lro.n_drop_idle; sfxge_lro_drop(rxq, c); } } } static void sfxge_lro_merge(struct sfxge_lro_state *st, struct sfxge_lro_conn *c, struct mbuf *mbuf, struct tcphdr *th) { struct tcphdr *c_th; /* Tack the new mbuf onto the chain. */ KASSERT(!mbuf->m_next, ("mbuf already chained")); c->mbuf_tail->m_next = mbuf; c->mbuf_tail = mbuf; /* Increase length appropriately */ c->mbuf->m_pkthdr.len += mbuf->m_len; /* Update the connection state flags */ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *iph = c->nh; iph->ip_len += mbuf->m_len; c_th = (struct tcphdr *)(iph + 1); } else { struct ip6_hdr *iph = c->nh; iph->ip6_plen += mbuf->m_len; c_th = (struct tcphdr *)(iph + 1); } c_th->th_flags |= (th->th_flags & TH_PUSH); c->th_last = th; ++st->n_merges; /* Pass packet up now if another segment could overflow the IP * length. */ if (c->mbuf->m_pkthdr.len > 65536 - 9200) sfxge_lro_deliver(st, c); } static void sfxge_lro_start(struct sfxge_lro_state *st, struct sfxge_lro_conn *c, struct mbuf *mbuf, void *nh, struct tcphdr *th) { /* Start the chain */ c->mbuf = mbuf; c->mbuf_tail = c->mbuf; c->nh = nh; c->th_last = th; mbuf->m_pkthdr.len = mbuf->m_len; /* Mangle header fields for later processing */ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *iph = nh; iph->ip_len = ntohs(iph->ip_len); } else { struct ip6_hdr *iph = nh; iph->ip6_plen = ntohs(iph->ip6_plen); } } /* Try to merge or otherwise hold or deliver (as appropriate) the * packet buffered for this connection (c->next_buf). Return a flag * indicating whether the connection is still active for LRO purposes. */ static int sfxge_lro_try_merge(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c) { struct sfxge_rx_sw_desc *rx_buf = &c->next_buf; char *eh = c->next_eh; int data_length, hdr_length, dont_merge; unsigned th_seq, pkt_length; struct tcphdr *th; unsigned now; if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *iph = c->next_nh; th = (struct tcphdr *)(iph + 1); pkt_length = ntohs(iph->ip_len) + (char *) iph - eh; } else { struct ip6_hdr *iph = c->next_nh; th = (struct tcphdr *)(iph + 1); pkt_length = ntohs(iph->ip6_plen) + (char *) th - eh; } hdr_length = (char *) th + th->th_off * 4 - eh; data_length = (min(pkt_length, rx_buf->size - rxq->sc->rx_prefix_size) - hdr_length); th_seq = ntohl(th->th_seq); dont_merge = ((data_length <= 0) | (th->th_flags & (TH_URG | TH_SYN | TH_RST | TH_FIN))); /* Check for options other than aligned timestamp. */ if (th->th_off != 5) { const uint32_t *opt_ptr = (const uint32_t *) (th + 1); if (th->th_off == 8 && opt_ptr[0] == ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { /* timestamp option -- okay */ } else { dont_merge = 1; } } if (__predict_false(th_seq != c->next_seq)) { /* Out-of-order, so start counting again. */ if (c->mbuf != NULL) sfxge_lro_deliver(&rxq->lro, c); c->n_in_order_pkts -= lro_loss_packets; c->next_seq = th_seq + data_length; ++rxq->lro.n_misorder; goto deliver_buf_out; } c->next_seq = th_seq + data_length; now = ticks; if (now - c->last_pkt_ticks > lro_idle_ticks) { ++rxq->lro.n_drop_idle; if (c->mbuf != NULL) sfxge_lro_deliver(&rxq->lro, c); sfxge_lro_drop(rxq, c); return (0); } c->last_pkt_ticks = ticks; if (c->n_in_order_pkts < lro_slow_start_packets) { /* May be in slow-start, so don't merge. */ ++rxq->lro.n_slow_start; ++c->n_in_order_pkts; goto deliver_buf_out; } if (__predict_false(dont_merge)) { if (c->mbuf != NULL) sfxge_lro_deliver(&rxq->lro, c); if (th->th_flags & (TH_FIN | TH_RST)) { ++rxq->lro.n_drop_closed; sfxge_lro_drop(rxq, c); return (0); } goto deliver_buf_out; } rx_buf->mbuf->m_data += rxq->sc->rx_prefix_size; if (__predict_true(c->mbuf != NULL)) { /* Remove headers and any padding */ rx_buf->mbuf->m_data += hdr_length; rx_buf->mbuf->m_len = data_length; sfxge_lro_merge(&rxq->lro, c, rx_buf->mbuf, th); } else { /* Remove any padding */ rx_buf->mbuf->m_len = pkt_length; sfxge_lro_start(&rxq->lro, c, rx_buf->mbuf, c->next_nh, th); } rx_buf->mbuf = NULL; return (1); deliver_buf_out: sfxge_rx_deliver(rxq, rx_buf); return (1); } static void sfxge_lro_new_conn(struct sfxge_lro_state *st, uint32_t conn_hash, uint16_t l2_id, void *nh, struct tcphdr *th) { unsigned bucket = conn_hash & st->conns_mask; struct sfxge_lro_conn *c; if (st->conns_n[bucket] >= lro_chain_max) { ++st->n_too_many; return; } if (!TAILQ_EMPTY(&st->free_conns)) { c = TAILQ_FIRST(&st->free_conns); TAILQ_REMOVE(&st->free_conns, c, link); } else { c = malloc(sizeof(*c), M_SFXGE, M_NOWAIT); if (c == NULL) return; c->mbuf = NULL; c->next_buf.mbuf = NULL; } /* Create the connection tracking data */ ++st->conns_n[bucket]; TAILQ_INSERT_HEAD(&st->conns[bucket], c, link); c->l2_id = l2_id; c->conn_hash = conn_hash; c->source = th->th_sport; c->dest = th->th_dport; c->n_in_order_pkts = 0; c->last_pkt_ticks = *(volatile int *)&ticks; c->delivered = 0; ++st->n_new_stream; /* NB. We don't initialise c->next_seq, and it doesn't matter what * value it has. Most likely the next packet received for this * connection will not match -- no harm done. */ } /* Process mbuf and decide whether to dispatch it to the stack now or * later. */ static void sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf) { struct sfxge_softc *sc = rxq->sc; struct mbuf *m = rx_buf->mbuf; struct ether_header *eh; struct sfxge_lro_conn *c; uint16_t l2_id; uint16_t l3_proto; void *nh; struct tcphdr *th; uint32_t conn_hash; unsigned bucket; /* Get the hardware hash */ conn_hash = efx_pseudo_hdr_hash_get(rxq->common, EFX_RX_HASHALG_TOEPLITZ, mtod(m, uint8_t *)); eh = (struct ether_header *)(m->m_data + sc->rx_prefix_size); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { struct ether_vlan_header *veh = (struct ether_vlan_header *)eh; l2_id = EVL_VLANOFTAG(ntohs(veh->evl_tag)) | SFXGE_LRO_L2_ID_VLAN; l3_proto = veh->evl_proto; nh = veh + 1; } else { l2_id = 0; l3_proto = eh->ether_type; nh = eh + 1; } /* Check whether this is a suitable packet (unfragmented * TCP/IPv4 or TCP/IPv6). If so, find the TCP header and * length, and compute a hash if necessary. If not, return. */ if (l3_proto == htons(ETHERTYPE_IP)) { struct ip *iph = nh; KASSERT(iph->ip_p == IPPROTO_TCP, ("IPv4 protocol is not TCP, but packet marker is set")); if ((iph->ip_hl - (sizeof(*iph) >> 2u)) | (iph->ip_off & htons(IP_MF | IP_OFFMASK))) goto deliver_now; th = (struct tcphdr *)(iph + 1); } else if (l3_proto == htons(ETHERTYPE_IPV6)) { struct ip6_hdr *iph = nh; KASSERT(iph->ip6_nxt == IPPROTO_TCP, ("IPv6 next header is not TCP, but packet marker is set")); l2_id |= SFXGE_LRO_L2_ID_IPV6; th = (struct tcphdr *)(iph + 1); } else { goto deliver_now; } bucket = conn_hash & rxq->lro.conns_mask; TAILQ_FOREACH(c, &rxq->lro.conns[bucket], link) { if ((c->l2_id - l2_id) | (c->conn_hash - conn_hash)) continue; if ((c->source - th->th_sport) | (c->dest - th->th_dport)) continue; if (c->mbuf != NULL) { if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *c_iph, *iph = nh; c_iph = c->nh; if ((c_iph->ip_src.s_addr - iph->ip_src.s_addr) | (c_iph->ip_dst.s_addr - iph->ip_dst.s_addr)) continue; } else { struct ip6_hdr *c_iph, *iph = nh; c_iph = c->nh; if (ipv6_addr_cmp(&c_iph->ip6_src, &iph->ip6_src) | ipv6_addr_cmp(&c_iph->ip6_dst, &iph->ip6_dst)) continue; } } /* Re-insert at head of list to reduce lookup time. */ TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link); TAILQ_INSERT_HEAD(&rxq->lro.conns[bucket], c, link); if (c->next_buf.mbuf != NULL) { if (!sfxge_lro_try_merge(rxq, c)) goto deliver_now; } else { LIST_INSERT_HEAD(&rxq->lro.active_conns, c, active_link); } c->next_buf = *rx_buf; c->next_eh = eh; c->next_nh = nh; rx_buf->mbuf = NULL; rx_buf->flags = EFX_DISCARD; return; } sfxge_lro_new_conn(&rxq->lro, conn_hash, l2_id, nh, th); deliver_now: sfxge_rx_deliver(rxq, rx_buf); } static void sfxge_lro_end_of_burst(struct sfxge_rxq *rxq) { struct sfxge_lro_state *st = &rxq->lro; struct sfxge_lro_conn *c; unsigned t; while (!LIST_EMPTY(&st->active_conns)) { c = LIST_FIRST(&st->active_conns); if (!c->delivered && c->mbuf != NULL) sfxge_lro_deliver(st, c); if (sfxge_lro_try_merge(rxq, c)) { if (c->mbuf != NULL) sfxge_lro_deliver(st, c); LIST_REMOVE(c, active_link); } c->delivered = 0; } t = *(volatile int *)&ticks; if (__predict_false(t != st->last_purge_ticks)) sfxge_lro_purge_idle(rxq, t); } #else /* !SFXGE_LRO */ static void sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf) { } static void sfxge_lro_end_of_burst(struct sfxge_rxq *rxq) { } #endif /* SFXGE_LRO */ void sfxge_rx_qcomplete(struct sfxge_rxq *rxq, boolean_t eop) { struct sfxge_softc *sc = rxq->sc; int if_capenable = sc->ifnet->if_capenable; int lro_enabled = if_capenable & IFCAP_LRO; unsigned int index; struct sfxge_evq *evq; unsigned int completed; unsigned int level; struct mbuf *m; struct sfxge_rx_sw_desc *prev = NULL; index = rxq->index; evq = sc->evq[index]; SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); completed = rxq->completed; while (completed != rxq->pending) { unsigned int id; struct sfxge_rx_sw_desc *rx_desc; id = completed++ & rxq->ptr_mask; rx_desc = &rxq->queue[id]; m = rx_desc->mbuf; if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) goto discard; if (rx_desc->flags & (EFX_ADDR_MISMATCH | EFX_DISCARD)) goto discard; /* Read the length from the pseudo header if required */ if (rx_desc->flags & EFX_PKT_PREFIX_LEN) { uint16_t tmp_size; int rc; rc = efx_pseudo_hdr_pkt_length_get(rxq->common, mtod(m, uint8_t *), &tmp_size); KASSERT(rc == 0, ("cannot get packet length: %d", rc)); rx_desc->size = (int)tmp_size + sc->rx_prefix_size; } prefetch_read_many(mtod(m, caddr_t)); switch (rx_desc->flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { case EFX_PKT_IPV4: if (~if_capenable & IFCAP_RXCSUM) rx_desc->flags &= ~(EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP); break; case EFX_PKT_IPV6: if (~if_capenable & IFCAP_RXCSUM_IPV6) rx_desc->flags &= ~EFX_CKSUM_TCPUDP; break; case 0: /* Check for loopback packets */ { struct ether_header *etherhp; /*LINTED*/ etherhp = mtod(m, struct ether_header *); if (etherhp->ether_type == htons(SFXGE_ETHERTYPE_LOOPBACK)) { EFSYS_PROBE(loopback); rxq->loopback++; goto discard; } } break; default: KASSERT(B_FALSE, ("Rx descriptor with both IPv4 and IPv6 flags")); goto discard; } /* Pass packet up the stack or into LRO (pipelined) */ if (prev != NULL) { if (lro_enabled && ((prev->flags & (EFX_PKT_TCP | EFX_CKSUM_TCPUDP)) == (EFX_PKT_TCP | EFX_CKSUM_TCPUDP))) sfxge_lro(rxq, prev); else sfxge_rx_deliver(rxq, prev); } prev = rx_desc; continue; discard: /* Return the packet to the pool */ m_free(m); rx_desc->mbuf = NULL; } rxq->completed = completed; level = rxq->added - rxq->completed; /* Pass last packet up the stack or into LRO */ if (prev != NULL) { if (lro_enabled && ((prev->flags & (EFX_PKT_TCP | EFX_CKSUM_TCPUDP)) == (EFX_PKT_TCP | EFX_CKSUM_TCPUDP))) sfxge_lro(rxq, prev); else sfxge_rx_deliver(rxq, prev); } /* * If there are any pending flows and this is the end of the * poll then they must be completed. */ if (eop) sfxge_lro_end_of_burst(rxq); /* Top up the queue if necessary */ if (level < rxq->refill_threshold) sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_FALSE); } static void sfxge_rx_qstop(struct sfxge_softc *sc, unsigned int index) { struct sfxge_rxq *rxq; struct sfxge_evq *evq; unsigned int count; unsigned int retry = 3; SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); rxq = sc->rxq[index]; evq = sc->evq[index]; SFXGE_EVQ_LOCK(evq); KASSERT(rxq->init_state == SFXGE_RXQ_STARTED, ("rxq not started")); rxq->init_state = SFXGE_RXQ_INITIALIZED; callout_stop(&rxq->refill_callout); while (rxq->flush_state != SFXGE_FLUSH_DONE && retry != 0) { rxq->flush_state = SFXGE_FLUSH_PENDING; SFXGE_EVQ_UNLOCK(evq); /* Flush the receive queue */ if (efx_rx_qflush(rxq->common) != 0) { SFXGE_EVQ_LOCK(evq); rxq->flush_state = SFXGE_FLUSH_FAILED; break; } count = 0; do { /* Spin for 100 ms */ DELAY(100000); if (rxq->flush_state != SFXGE_FLUSH_PENDING) break; } while (++count < 20); SFXGE_EVQ_LOCK(evq); if (rxq->flush_state == SFXGE_FLUSH_PENDING) { /* Flush timeout - neither done nor failed */ log(LOG_ERR, "%s: Cannot flush Rx queue %u\n", device_get_nameunit(sc->dev), index); rxq->flush_state = SFXGE_FLUSH_DONE; } retry--; } if (rxq->flush_state == SFXGE_FLUSH_FAILED) { log(LOG_ERR, "%s: Flushing Rx queue %u failed\n", device_get_nameunit(sc->dev), index); rxq->flush_state = SFXGE_FLUSH_DONE; } rxq->pending = rxq->added; sfxge_rx_qcomplete(rxq, B_TRUE); KASSERT(rxq->completed == rxq->pending, ("rxq->completed != rxq->pending")); rxq->added = 0; rxq->pushed = 0; rxq->pending = 0; rxq->completed = 0; rxq->loopback = 0; /* Destroy the common code receive queue. */ efx_rx_qdestroy(rxq->common); efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id, EFX_RXQ_NBUFS(sc->rxq_entries)); SFXGE_EVQ_UNLOCK(evq); } static int sfxge_rx_qstart(struct sfxge_softc *sc, unsigned int index) { struct sfxge_rxq *rxq; efsys_mem_t *esmp; struct sfxge_evq *evq; int rc; SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); rxq = sc->rxq[index]; esmp = &rxq->mem; evq = sc->evq[index]; KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED, ("rxq->init_state != SFXGE_RXQ_INITIALIZED")); KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq->init_state != SFXGE_EVQ_STARTED")); /* Program the buffer table. */ if ((rc = efx_sram_buf_tbl_set(sc->enp, rxq->buf_base_id, esmp, EFX_RXQ_NBUFS(sc->rxq_entries))) != 0) return (rc); /* Create the common code receive queue. */ if ((rc = efx_rx_qcreate(sc->enp, index, 0, EFX_RXQ_TYPE_DEFAULT, - esmp, sc->rxq_entries, rxq->buf_base_id, evq->common, - &rxq->common)) != 0) + esmp, sc->rxq_entries, rxq->buf_base_id, EFX_RXQ_FLAG_NONE, + evq->common, &rxq->common)) != 0) goto fail; SFXGE_EVQ_LOCK(evq); /* Enable the receive queue. */ efx_rx_qenable(rxq->common); rxq->init_state = SFXGE_RXQ_STARTED; rxq->flush_state = SFXGE_FLUSH_REQUIRED; /* Try to fill the queue from the pool. */ sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(sc->rxq_entries), B_FALSE); SFXGE_EVQ_UNLOCK(evq); return (0); fail: efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id, EFX_RXQ_NBUFS(sc->rxq_entries)); return (rc); } void sfxge_rx_stop(struct sfxge_softc *sc) { int index; efx_mac_filter_default_rxq_clear(sc->enp); /* Stop the receive queue(s) */ index = sc->rxq_count; while (--index >= 0) sfxge_rx_qstop(sc, index); sc->rx_prefix_size = 0; sc->rx_buffer_size = 0; efx_rx_fini(sc->enp); } int sfxge_rx_start(struct sfxge_softc *sc) { struct sfxge_intr *intr; const efx_nic_cfg_t *encp; size_t hdrlen, align, reserved; int index; int rc; intr = &sc->intr; /* Initialize the common code receive module. */ if ((rc = efx_rx_init(sc->enp)) != 0) return (rc); encp = efx_nic_cfg_get(sc->enp); sc->rx_buffer_size = EFX_MAC_PDU(sc->ifnet->if_mtu); /* Calculate the receive packet buffer size. */ sc->rx_prefix_size = encp->enc_rx_prefix_size; /* Ensure IP headers are 32bit aligned */ hdrlen = sc->rx_prefix_size + sizeof (struct ether_header); sc->rx_buffer_align = P2ROUNDUP(hdrlen, 4) - hdrlen; sc->rx_buffer_size += sc->rx_buffer_align; /* Align end of packet buffer for RX DMA end padding */ align = MAX(1, encp->enc_rx_buf_align_end); EFSYS_ASSERT(ISP2(align)); sc->rx_buffer_size = P2ROUNDUP(sc->rx_buffer_size, align); /* * Standard mbuf zones only guarantee pointer-size alignment; * we need extra space to align to the cache line */ reserved = sc->rx_buffer_size + CACHE_LINE_SIZE; /* Select zone for packet buffers */ if (reserved <= MCLBYTES) sc->rx_cluster_size = MCLBYTES; else if (reserved <= MJUMPAGESIZE) sc->rx_cluster_size = MJUMPAGESIZE; else if (reserved <= MJUM9BYTES) sc->rx_cluster_size = MJUM9BYTES; else sc->rx_cluster_size = MJUM16BYTES; /* * Set up the scale table. Enable all hash types and hash insertion. */ for (index = 0; index < nitems(sc->rx_indir_table); index++) #ifdef RSS sc->rx_indir_table[index] = rss_get_indirection_to_bucket(index) % sc->rxq_count; #else sc->rx_indir_table[index] = index % sc->rxq_count; #endif if ((rc = efx_rx_scale_tbl_set(sc->enp, EFX_RSS_CONTEXT_DEFAULT, sc->rx_indir_table, nitems(sc->rx_indir_table))) != 0) goto fail; (void)efx_rx_scale_mode_set(sc->enp, EFX_RSS_CONTEXT_DEFAULT, EFX_RX_HASHALG_TOEPLITZ, EFX_RX_HASH_IPV4 | EFX_RX_HASH_TCPIPV4 | EFX_RX_HASH_IPV6 | EFX_RX_HASH_TCPIPV6, B_TRUE); #ifdef RSS rss_getkey(toep_key); #endif if ((rc = efx_rx_scale_key_set(sc->enp, EFX_RSS_CONTEXT_DEFAULT, toep_key, sizeof(toep_key))) != 0) goto fail; /* Start the receive queue(s). */ for (index = 0; index < sc->rxq_count; index++) { if ((rc = sfxge_rx_qstart(sc, index)) != 0) goto fail2; } rc = efx_mac_filter_default_rxq_set(sc->enp, sc->rxq[0]->common, sc->intr.n_alloc > 1); if (rc != 0) goto fail3; return (0); fail3: fail2: while (--index >= 0) sfxge_rx_qstop(sc, index); fail: efx_rx_fini(sc->enp); return (rc); } #ifdef SFXGE_LRO static void sfxge_lro_init(struct sfxge_rxq *rxq) { struct sfxge_lro_state *st = &rxq->lro; unsigned i; st->conns_mask = lro_table_size - 1; KASSERT(!((st->conns_mask + 1) & st->conns_mask), ("lro_table_size must be a power of 2")); st->sc = rxq->sc; st->conns = malloc((st->conns_mask + 1) * sizeof(st->conns[0]), M_SFXGE, M_WAITOK); st->conns_n = malloc((st->conns_mask + 1) * sizeof(st->conns_n[0]), M_SFXGE, M_WAITOK); for (i = 0; i <= st->conns_mask; ++i) { TAILQ_INIT(&st->conns[i]); st->conns_n[i] = 0; } LIST_INIT(&st->active_conns); TAILQ_INIT(&st->free_conns); } static void sfxge_lro_fini(struct sfxge_rxq *rxq) { struct sfxge_lro_state *st = &rxq->lro; struct sfxge_lro_conn *c; unsigned i; /* Return cleanly if sfxge_lro_init() has not been called. */ if (st->conns == NULL) return; KASSERT(LIST_EMPTY(&st->active_conns), ("found active connections")); for (i = 0; i <= st->conns_mask; ++i) { while (!TAILQ_EMPTY(&st->conns[i])) { c = TAILQ_LAST(&st->conns[i], sfxge_lro_tailq); sfxge_lro_drop(rxq, c); } } while (!TAILQ_EMPTY(&st->free_conns)) { c = TAILQ_FIRST(&st->free_conns); TAILQ_REMOVE(&st->free_conns, c, link); KASSERT(!c->mbuf, ("found orphaned mbuf")); free(c, M_SFXGE); } free(st->conns_n, M_SFXGE); free(st->conns, M_SFXGE); st->conns = NULL; } #else static void sfxge_lro_init(struct sfxge_rxq *rxq) { } static void sfxge_lro_fini(struct sfxge_rxq *rxq) { } #endif /* SFXGE_LRO */ static void sfxge_rx_qfini(struct sfxge_softc *sc, unsigned int index) { struct sfxge_rxq *rxq; rxq = sc->rxq[index]; KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED, ("rxq->init_state != SFXGE_RXQ_INITIALIZED")); /* Free the context array and the flow table. */ free(rxq->queue, M_SFXGE); sfxge_lro_fini(rxq); /* Release DMA memory. */ sfxge_dma_free(&rxq->mem); sc->rxq[index] = NULL; free(rxq, M_SFXGE); } static int sfxge_rx_qinit(struct sfxge_softc *sc, unsigned int index) { struct sfxge_rxq *rxq; struct sfxge_evq *evq; efsys_mem_t *esmp; int rc; KASSERT(index < sc->rxq_count, ("index >= %d", sc->rxq_count)); rxq = malloc(sizeof(struct sfxge_rxq), M_SFXGE, M_ZERO | M_WAITOK); rxq->sc = sc; rxq->index = index; rxq->entries = sc->rxq_entries; rxq->ptr_mask = rxq->entries - 1; rxq->refill_threshold = RX_REFILL_THRESHOLD(rxq->entries); sc->rxq[index] = rxq; esmp = &rxq->mem; evq = sc->evq[index]; /* Allocate and zero DMA space. */ if ((rc = sfxge_dma_alloc(sc, EFX_RXQ_SIZE(sc->rxq_entries), esmp)) != 0) return (rc); /* Allocate buffer table entries. */ sfxge_sram_buf_tbl_alloc(sc, EFX_RXQ_NBUFS(sc->rxq_entries), &rxq->buf_base_id); /* Allocate the context array and the flow table. */ rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * sc->rxq_entries, M_SFXGE, M_WAITOK | M_ZERO); sfxge_lro_init(rxq); callout_init(&rxq->refill_callout, 1); rxq->init_state = SFXGE_RXQ_INITIALIZED; return (0); } static const struct { const char *name; size_t offset; } sfxge_rx_stats[] = { #define SFXGE_RX_STAT(name, member) \ { #name, offsetof(struct sfxge_rxq, member) } #ifdef SFXGE_LRO SFXGE_RX_STAT(lro_merges, lro.n_merges), SFXGE_RX_STAT(lro_bursts, lro.n_bursts), SFXGE_RX_STAT(lro_slow_start, lro.n_slow_start), SFXGE_RX_STAT(lro_misorder, lro.n_misorder), SFXGE_RX_STAT(lro_too_many, lro.n_too_many), SFXGE_RX_STAT(lro_new_stream, lro.n_new_stream), SFXGE_RX_STAT(lro_drop_idle, lro.n_drop_idle), SFXGE_RX_STAT(lro_drop_closed, lro.n_drop_closed) #endif }; static int sfxge_rx_stat_handler(SYSCTL_HANDLER_ARGS) { struct sfxge_softc *sc = arg1; unsigned int id = arg2; unsigned int sum, index; /* Sum across all RX queues */ sum = 0; for (index = 0; index < sc->rxq_count; index++) sum += *(unsigned int *)((caddr_t)sc->rxq[index] + sfxge_rx_stats[id].offset); return (SYSCTL_OUT(req, &sum, sizeof(sum))); } static void sfxge_rx_stat_init(struct sfxge_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid_list *stat_list; unsigned int id; stat_list = SYSCTL_CHILDREN(sc->stats_node); for (id = 0; id < nitems(sfxge_rx_stats); id++) { SYSCTL_ADD_PROC( ctx, stat_list, OID_AUTO, sfxge_rx_stats[id].name, CTLTYPE_UINT|CTLFLAG_RD, sc, id, sfxge_rx_stat_handler, "IU", ""); } } void sfxge_rx_fini(struct sfxge_softc *sc) { int index; index = sc->rxq_count; while (--index >= 0) sfxge_rx_qfini(sc, index); sc->rxq_count = 0; } int sfxge_rx_init(struct sfxge_softc *sc) { struct sfxge_intr *intr; int index; int rc; #ifdef SFXGE_LRO if (!ISP2(lro_table_size)) { log(LOG_ERR, "%s=%u must be power of 2", SFXGE_LRO_PARAM(table_size), lro_table_size); rc = EINVAL; goto fail_lro_table_size; } if (lro_idle_ticks == 0) lro_idle_ticks = hz / 10 + 1; /* 100 ms */ #endif intr = &sc->intr; sc->rxq_count = intr->n_alloc; KASSERT(intr->state == SFXGE_INTR_INITIALIZED, ("intr->state != SFXGE_INTR_INITIALIZED")); /* Initialize the receive queue(s) - one per interrupt. */ for (index = 0; index < sc->rxq_count; index++) { if ((rc = sfxge_rx_qinit(sc, index)) != 0) goto fail; } sfxge_rx_stat_init(sc); return (0); fail: /* Tear down the receive queue(s). */ while (--index >= 0) sfxge_rx_qfini(sc, index); sc->rxq_count = 0; #ifdef SFXGE_LRO fail_lro_table_size: #endif return (rc); } Index: projects/clang700-import/sys/kern/kern_event.c =================================================================== --- projects/clang700-import/sys/kern/kern_event.c (revision 340917) +++ projects/clang700-import/sys/kern/kern_event.c (revision 340918) @@ -1,2732 +1,2740 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999,2000,2001 Jonathan Lemon * Copyright 2004 John-Mark Gurney * Copyright (c) 2009 Apple, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ktrace.h" #include "opt_kqueue.h" #ifdef COMPAT_FREEBSD11 #define _WANT_FREEBSD11_KEVENT #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #endif #include #include static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); /* * This lock is used if multiple kq locks are required. This possibly * should be made into a per proc lock. */ static struct mtx kq_global; MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); #define KQ_GLOBAL_LOCK(lck, haslck) do { \ if (!haslck) \ mtx_lock(lck); \ haslck = 1; \ } while (0) #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ if (haslck) \ mtx_unlock(lck); \ haslck = 0; \ } while (0) TASKQUEUE_DEFINE_THREAD(kqueue_ctx); static int kevent_copyout(void *arg, struct kevent *kevp, int count); static int kevent_copyin(void *arg, struct kevent *kevp, int count); static int kqueue_register(struct kqueue *kq, struct kevent *kev, - struct thread *td, int waitok); + struct thread *td, int mflag); static int kqueue_acquire(struct file *fp, struct kqueue **kqp); static void kqueue_release(struct kqueue *kq, int locked); static void kqueue_destroy(struct kqueue *kq); static void kqueue_drain(struct kqueue *kq, struct thread *td); static int kqueue_expand(struct kqueue *kq, struct filterops *fops, - uintptr_t ident, int waitok); + uintptr_t ident, int mflag); static void kqueue_task(void *arg, int pending); static int kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, const struct timespec *timeout, struct kevent *keva, struct thread *td); static void kqueue_wakeup(struct kqueue *kq); static struct filterops *kqueue_fo_find(int filt); static void kqueue_fo_release(int filt); struct g_kevent_args; static int kern_kevent_generic(struct thread *td, struct g_kevent_args *uap, struct kevent_copyops *k_ops, const char *struct_name); static fo_ioctl_t kqueue_ioctl; static fo_poll_t kqueue_poll; static fo_kqfilter_t kqueue_kqfilter; static fo_stat_t kqueue_stat; static fo_close_t kqueue_close; static fo_fill_kinfo_t kqueue_fill_kinfo; static struct fileops kqueueops = { .fo_read = invfo_rdwr, .fo_write = invfo_rdwr, .fo_truncate = invfo_truncate, .fo_ioctl = kqueue_ioctl, .fo_poll = kqueue_poll, .fo_kqfilter = kqueue_kqfilter, .fo_stat = kqueue_stat, .fo_close = kqueue_close, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, .fo_fill_kinfo = kqueue_fill_kinfo, }; static int knote_attach(struct knote *kn, struct kqueue *kq); static void knote_drop(struct knote *kn, struct thread *td); static void knote_drop_detached(struct knote *kn, struct thread *td); static void knote_enqueue(struct knote *kn); static void knote_dequeue(struct knote *kn); static void knote_init(void); -static struct knote *knote_alloc(int waitok); +static struct knote *knote_alloc(int mflag); static void knote_free(struct knote *kn); static void filt_kqdetach(struct knote *kn); static int filt_kqueue(struct knote *kn, long hint); static int filt_procattach(struct knote *kn); static void filt_procdetach(struct knote *kn); static int filt_proc(struct knote *kn, long hint); static int filt_fileattach(struct knote *kn); static void filt_timerexpire(void *knx); static int filt_timerattach(struct knote *kn); static void filt_timerdetach(struct knote *kn); static void filt_timerstart(struct knote *kn, sbintime_t to); static void filt_timertouch(struct knote *kn, struct kevent *kev, u_long type); static int filt_timervalidate(struct knote *kn, sbintime_t *to); static int filt_timer(struct knote *kn, long hint); static int filt_userattach(struct knote *kn); static void filt_userdetach(struct knote *kn); static int filt_user(struct knote *kn, long hint); static void filt_usertouch(struct knote *kn, struct kevent *kev, u_long type); static struct filterops file_filtops = { .f_isfd = 1, .f_attach = filt_fileattach, }; static struct filterops kqread_filtops = { .f_isfd = 1, .f_detach = filt_kqdetach, .f_event = filt_kqueue, }; /* XXX - move to kern_proc.c? */ static struct filterops proc_filtops = { .f_isfd = 0, .f_attach = filt_procattach, .f_detach = filt_procdetach, .f_event = filt_proc, }; static struct filterops timer_filtops = { .f_isfd = 0, .f_attach = filt_timerattach, .f_detach = filt_timerdetach, .f_event = filt_timer, .f_touch = filt_timertouch, }; static struct filterops user_filtops = { .f_attach = filt_userattach, .f_detach = filt_userdetach, .f_event = filt_user, .f_touch = filt_usertouch, }; static uma_zone_t knote_zone; static unsigned int kq_ncallouts = 0; static unsigned int kq_calloutmax = 4 * 1024; SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); /* XXX - ensure not influx ? */ #define KNOTE_ACTIVATE(kn, islock) do { \ if ((islock)) \ mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ else \ KQ_LOCK((kn)->kn_kq); \ (kn)->kn_status |= KN_ACTIVE; \ if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ knote_enqueue((kn)); \ if (!(islock)) \ KQ_UNLOCK((kn)->kn_kq); \ } while(0) #define KQ_LOCK(kq) do { \ mtx_lock(&(kq)->kq_lock); \ } while (0) #define KQ_FLUX_WAKEUP(kq) do { \ if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ (kq)->kq_state &= ~KQ_FLUXWAIT; \ wakeup((kq)); \ } \ } while (0) #define KQ_UNLOCK_FLUX(kq) do { \ KQ_FLUX_WAKEUP(kq); \ mtx_unlock(&(kq)->kq_lock); \ } while (0) #define KQ_UNLOCK(kq) do { \ mtx_unlock(&(kq)->kq_lock); \ } while (0) #define KQ_OWNED(kq) do { \ mtx_assert(&(kq)->kq_lock, MA_OWNED); \ } while (0) #define KQ_NOTOWNED(kq) do { \ mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ } while (0) static struct knlist * kn_list_lock(struct knote *kn) { struct knlist *knl; knl = kn->kn_knlist; if (knl != NULL) knl->kl_lock(knl->kl_lockarg); return (knl); } static void kn_list_unlock(struct knlist *knl) { bool do_free; if (knl == NULL) return; do_free = knl->kl_autodestroy && knlist_empty(knl); knl->kl_unlock(knl->kl_lockarg); if (do_free) { knlist_destroy(knl); free(knl, M_KQUEUE); } } static bool kn_in_flux(struct knote *kn) { return (kn->kn_influx > 0); } static void kn_enter_flux(struct knote *kn) { KQ_OWNED(kn->kn_kq); MPASS(kn->kn_influx < INT_MAX); kn->kn_influx++; } static bool kn_leave_flux(struct knote *kn) { KQ_OWNED(kn->kn_kq); MPASS(kn->kn_influx > 0); kn->kn_influx--; return (kn->kn_influx == 0); } #define KNL_ASSERT_LOCK(knl, islocked) do { \ if (islocked) \ KNL_ASSERT_LOCKED(knl); \ else \ KNL_ASSERT_UNLOCKED(knl); \ } while (0) #ifdef INVARIANTS #define KNL_ASSERT_LOCKED(knl) do { \ knl->kl_assert_locked((knl)->kl_lockarg); \ } while (0) #define KNL_ASSERT_UNLOCKED(knl) do { \ knl->kl_assert_unlocked((knl)->kl_lockarg); \ } while (0) #else /* !INVARIANTS */ #define KNL_ASSERT_LOCKED(knl) do {} while(0) #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) #endif /* INVARIANTS */ #ifndef KN_HASHSIZE #define KN_HASHSIZE 64 /* XXX should be tunable */ #endif #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) static int filt_nullattach(struct knote *kn) { return (ENXIO); }; struct filterops null_filtops = { .f_isfd = 0, .f_attach = filt_nullattach, }; /* XXX - make SYSINIT to add these, and move into respective modules. */ extern struct filterops sig_filtops; extern struct filterops fs_filtops; /* * Table for for all system-defined filters. */ static struct mtx filterops_lock; MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", MTX_DEF); static struct { struct filterops *for_fop; int for_nolock; int for_refcnt; } sysfilt_ops[EVFILT_SYSCOUNT] = { { &file_filtops, 1 }, /* EVFILT_READ */ { &file_filtops, 1 }, /* EVFILT_WRITE */ { &null_filtops }, /* EVFILT_AIO */ { &file_filtops, 1 }, /* EVFILT_VNODE */ { &proc_filtops, 1 }, /* EVFILT_PROC */ { &sig_filtops, 1 }, /* EVFILT_SIGNAL */ { &timer_filtops, 1 }, /* EVFILT_TIMER */ { &file_filtops, 1 }, /* EVFILT_PROCDESC */ { &fs_filtops, 1 }, /* EVFILT_FS */ { &null_filtops }, /* EVFILT_LIO */ { &user_filtops, 1 }, /* EVFILT_USER */ { &null_filtops }, /* EVFILT_SENDFILE */ { &file_filtops, 1 }, /* EVFILT_EMPTY */ }; /* * Simple redirection for all cdevsw style objects to call their fo_kqfilter * method. */ static int filt_fileattach(struct knote *kn) { return (fo_kqfilter(kn->kn_fp, kn)); } /*ARGSUSED*/ static int kqueue_kqfilter(struct file *fp, struct knote *kn) { struct kqueue *kq = kn->kn_fp->f_data; if (kn->kn_filter != EVFILT_READ) return (EINVAL); kn->kn_status |= KN_KQUEUE; kn->kn_fop = &kqread_filtops; knlist_add(&kq->kq_sel.si_note, kn, 0); return (0); } static void filt_kqdetach(struct knote *kn) { struct kqueue *kq = kn->kn_fp->f_data; knlist_remove(&kq->kq_sel.si_note, kn, 0); } /*ARGSUSED*/ static int filt_kqueue(struct knote *kn, long hint) { struct kqueue *kq = kn->kn_fp->f_data; kn->kn_data = kq->kq_count; return (kn->kn_data > 0); } /* XXX - move to kern_proc.c? */ static int filt_procattach(struct knote *kn) { struct proc *p; int error; bool exiting, immediate; exiting = immediate = false; if (kn->kn_sfflags & NOTE_EXIT) p = pfind_any(kn->kn_id); else p = pfind(kn->kn_id); if (p == NULL) return (ESRCH); if (p->p_flag & P_WEXIT) exiting = true; if ((error = p_cansee(curthread, p))) { PROC_UNLOCK(p); return (error); } kn->kn_ptr.p_proc = p; kn->kn_flags |= EV_CLEAR; /* automatically set */ /* * Internal flag indicating registration done by kernel for the * purposes of getting a NOTE_CHILD notification. */ if (kn->kn_flags & EV_FLAG2) { kn->kn_flags &= ~EV_FLAG2; kn->kn_data = kn->kn_sdata; /* ppid */ kn->kn_fflags = NOTE_CHILD; kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK); immediate = true; /* Force immediate activation of child note. */ } /* * Internal flag indicating registration done by kernel (for other than * NOTE_CHILD). */ if (kn->kn_flags & EV_FLAG1) { kn->kn_flags &= ~EV_FLAG1; } knlist_add(p->p_klist, kn, 1); /* * Immediately activate any child notes or, in the case of a zombie * target process, exit notes. The latter is necessary to handle the * case where the target process, e.g. a child, dies before the kevent * is registered. */ if (immediate || (exiting && filt_proc(kn, NOTE_EXIT))) KNOTE_ACTIVATE(kn, 0); PROC_UNLOCK(p); return (0); } /* * The knote may be attached to a different process, which may exit, * leaving nothing for the knote to be attached to. So when the process * exits, the knote is marked as DETACHED and also flagged as ONESHOT so * it will be deleted when read out. However, as part of the knote deletion, * this routine is called, so a check is needed to avoid actually performing * a detach, because the original process does not exist any more. */ /* XXX - move to kern_proc.c? */ static void filt_procdetach(struct knote *kn) { knlist_remove(kn->kn_knlist, kn, 0); kn->kn_ptr.p_proc = NULL; } /* XXX - move to kern_proc.c? */ static int filt_proc(struct knote *kn, long hint) { struct proc *p; u_int event; p = kn->kn_ptr.p_proc; if (p == NULL) /* already activated, from attach filter */ return (0); /* Mask off extra data. */ event = (u_int)hint & NOTE_PCTRLMASK; /* If the user is interested in this event, record it. */ if (kn->kn_sfflags & event) kn->kn_fflags |= event; /* Process is gone, so flag the event as finished. */ if (event == NOTE_EXIT) { kn->kn_flags |= EV_EOF | EV_ONESHOT; kn->kn_ptr.p_proc = NULL; if (kn->kn_fflags & NOTE_EXIT) kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig); if (kn->kn_fflags == 0) kn->kn_flags |= EV_DROP; return (1); } return (kn->kn_fflags != 0); } /* * Called when the process forked. It mostly does the same as the * knote(), activating all knotes registered to be activated when the * process forked. Additionally, for each knote attached to the * parent, check whether user wants to track the new process. If so * attach a new knote to it, and immediately report an event with the * child's pid. */ void knote_fork(struct knlist *list, int pid) { struct kqueue *kq; struct knote *kn; struct kevent kev; int error; if (list == NULL) return; - list->kl_lock(list->kl_lockarg); + memset(&kev, 0, sizeof(kev)); + list->kl_lock(list->kl_lockarg); SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { kq = kn->kn_kq; KQ_LOCK(kq); if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { KQ_UNLOCK(kq); continue; } /* * The same as knote(), activate the event. */ if ((kn->kn_sfflags & NOTE_TRACK) == 0) { if (kn->kn_fop->f_event(kn, NOTE_FORK)) KNOTE_ACTIVATE(kn, 1); KQ_UNLOCK(kq); continue; } /* * The NOTE_TRACK case. In addition to the activation * of the event, we need to register new events to * track the child. Drop the locks in preparation for * the call to kqueue_register(). */ kn_enter_flux(kn); KQ_UNLOCK(kq); list->kl_unlock(list->kl_lockarg); /* * Activate existing knote and register tracking knotes with * new process. * * First register a knote to get just the child notice. This * must be a separate note from a potential NOTE_EXIT * notification since both NOTE_CHILD and NOTE_EXIT are defined * to use the data field (in conflicting ways). */ kev.ident = pid; kev.filter = kn->kn_filter; kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT | EV_FLAG2; kev.fflags = kn->kn_sfflags; kev.data = kn->kn_id; /* parent */ kev.udata = kn->kn_kevent.udata;/* preserve udata */ - error = kqueue_register(kq, &kev, NULL, 0); + error = kqueue_register(kq, &kev, NULL, M_NOWAIT); if (error) kn->kn_fflags |= NOTE_TRACKERR; /* * Then register another knote to track other potential events * from the new process. */ kev.ident = pid; kev.filter = kn->kn_filter; kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; kev.fflags = kn->kn_sfflags; kev.data = kn->kn_id; /* parent */ kev.udata = kn->kn_kevent.udata;/* preserve udata */ - error = kqueue_register(kq, &kev, NULL, 0); + error = kqueue_register(kq, &kev, NULL, M_NOWAIT); if (error) kn->kn_fflags |= NOTE_TRACKERR; if (kn->kn_fop->f_event(kn, NOTE_FORK)) KNOTE_ACTIVATE(kn, 0); + list->kl_lock(list->kl_lockarg); KQ_LOCK(kq); kn_leave_flux(kn); KQ_UNLOCK_FLUX(kq); - list->kl_lock(list->kl_lockarg); } list->kl_unlock(list->kl_lockarg); } /* * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the * interval timer support code. */ #define NOTE_TIMER_PRECMASK \ (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS) static sbintime_t timer2sbintime(intptr_t data, int flags) { int64_t secs; /* * Macros for converting to the fractional second portion of an * sbintime_t using 64bit multiplication to improve precision. */ #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32) #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32) #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32) switch (flags & NOTE_TIMER_PRECMASK) { case NOTE_SECONDS: #ifdef __LP64__ if (data > (SBT_MAX / SBT_1S)) return (SBT_MAX); #endif return ((sbintime_t)data << 32); case NOTE_MSECONDS: /* FALLTHROUGH */ case 0: if (data >= 1000) { secs = data / 1000; #ifdef __LP64__ if (secs > (SBT_MAX / SBT_1S)) return (SBT_MAX); #endif return (secs << 32 | MS_TO_SBT(data % 1000)); } return (MS_TO_SBT(data)); case NOTE_USECONDS: if (data >= 1000000) { secs = data / 1000000; #ifdef __LP64__ if (secs > (SBT_MAX / SBT_1S)) return (SBT_MAX); #endif return (secs << 32 | US_TO_SBT(data % 1000000)); } return (US_TO_SBT(data)); case NOTE_NSECONDS: if (data >= 1000000000) { secs = data / 1000000000; #ifdef __LP64__ if (secs > (SBT_MAX / SBT_1S)) return (SBT_MAX); #endif return (secs << 32 | US_TO_SBT(data % 1000000000)); } return (NS_TO_SBT(data)); default: break; } return (-1); } struct kq_timer_cb_data { struct callout c; sbintime_t next; /* next timer event fires at */ sbintime_t to; /* precalculated timer period, 0 for abs */ }; static void filt_timerexpire(void *knx) { struct knote *kn; struct kq_timer_cb_data *kc; kn = knx; kn->kn_data++; KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ if ((kn->kn_flags & EV_ONESHOT) != 0) return; kc = kn->kn_ptr.p_v; if (kc->to == 0) return; kc->next += kc->to; callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE); } /* * data contains amount of time to sleep */ static int filt_timervalidate(struct knote *kn, sbintime_t *to) { struct bintime bt; sbintime_t sbt; if (kn->kn_sdata < 0) return (EINVAL); if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) kn->kn_sdata = 1; /* * The only fflags values supported are the timer unit * (precision) and the absolute time indicator. */ if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0) return (EINVAL); *to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { getboottimebin(&bt); sbt = bttosbt(bt); *to -= sbt; } if (*to < 0) return (EINVAL); return (0); } static int filt_timerattach(struct knote *kn) { struct kq_timer_cb_data *kc; sbintime_t to; unsigned int ncallouts; int error; error = filt_timervalidate(kn, &to); if (error != 0) return (error); do { ncallouts = kq_ncallouts; if (ncallouts >= kq_calloutmax) return (ENOMEM); } while (!atomic_cmpset_int(&kq_ncallouts, ncallouts, ncallouts + 1)); if ((kn->kn_sfflags & NOTE_ABSTIME) == 0) kn->kn_flags |= EV_CLEAR; /* automatically set */ kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK); callout_init(&kc->c, 1); filt_timerstart(kn, to); return (0); } static void filt_timerstart(struct knote *kn, sbintime_t to) { struct kq_timer_cb_data *kc; kc = kn->kn_ptr.p_v; if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { kc->next = to; kc->to = 0; } else { kc->next = to + sbinuptime(); kc->to = to; } callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE); } static void filt_timerdetach(struct knote *kn) { struct kq_timer_cb_data *kc; unsigned int old __unused; kc = kn->kn_ptr.p_v; callout_drain(&kc->c); free(kc, M_KQUEUE); old = atomic_fetchadd_int(&kq_ncallouts, -1); KASSERT(old > 0, ("Number of callouts cannot become negative")); kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ } static void filt_timertouch(struct knote *kn, struct kevent *kev, u_long type) { struct kq_timer_cb_data *kc; struct kqueue *kq; sbintime_t to; int error; switch (type) { case EVENT_REGISTER: /* Handle re-added timers that update data/fflags */ if (kev->flags & EV_ADD) { kc = kn->kn_ptr.p_v; /* Drain any existing callout. */ callout_drain(&kc->c); /* Throw away any existing undelivered record * of the timer expiration. This is done under * the presumption that if a process is * re-adding this timer with new parameters, * it is no longer interested in what may have * happened under the old parameters. If it is * interested, it can wait for the expiration, * delete the old timer definition, and then * add the new one. * * This has to be done while the kq is locked: * - if enqueued, dequeue * - make it no longer active * - clear the count of expiration events */ kq = kn->kn_kq; KQ_LOCK(kq); if (kn->kn_status & KN_QUEUED) knote_dequeue(kn); kn->kn_status &= ~KN_ACTIVE; kn->kn_data = 0; KQ_UNLOCK(kq); /* Reschedule timer based on new data/fflags */ kn->kn_sfflags = kev->fflags; kn->kn_sdata = kev->data; error = filt_timervalidate(kn, &to); if (error != 0) { kn->kn_flags |= EV_ERROR; kn->kn_data = error; } else filt_timerstart(kn, to); } break; case EVENT_PROCESS: *kev = kn->kn_kevent; if (kn->kn_flags & EV_CLEAR) { kn->kn_data = 0; kn->kn_fflags = 0; } break; default: panic("filt_timertouch() - invalid type (%ld)", type); break; } } static int filt_timer(struct knote *kn, long hint) { return (kn->kn_data != 0); } static int filt_userattach(struct knote *kn) { /* * EVFILT_USER knotes are not attached to anything in the kernel. */ kn->kn_hook = NULL; if (kn->kn_fflags & NOTE_TRIGGER) kn->kn_hookid = 1; else kn->kn_hookid = 0; return (0); } static void filt_userdetach(__unused struct knote *kn) { /* * EVFILT_USER knotes are not attached to anything in the kernel. */ } static int filt_user(struct knote *kn, __unused long hint) { return (kn->kn_hookid); } static void filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) { u_int ffctrl; switch (type) { case EVENT_REGISTER: if (kev->fflags & NOTE_TRIGGER) kn->kn_hookid = 1; ffctrl = kev->fflags & NOTE_FFCTRLMASK; kev->fflags &= NOTE_FFLAGSMASK; switch (ffctrl) { case NOTE_FFNOP: break; case NOTE_FFAND: kn->kn_sfflags &= kev->fflags; break; case NOTE_FFOR: kn->kn_sfflags |= kev->fflags; break; case NOTE_FFCOPY: kn->kn_sfflags = kev->fflags; break; default: /* XXX Return error? */ break; } kn->kn_sdata = kev->data; if (kev->flags & EV_CLEAR) { kn->kn_hookid = 0; kn->kn_data = 0; kn->kn_fflags = 0; } break; case EVENT_PROCESS: *kev = kn->kn_kevent; kev->fflags = kn->kn_sfflags; kev->data = kn->kn_sdata; if (kn->kn_flags & EV_CLEAR) { kn->kn_hookid = 0; kn->kn_data = 0; kn->kn_fflags = 0; } break; default: panic("filt_usertouch() - invalid type (%ld)", type); break; } } int sys_kqueue(struct thread *td, struct kqueue_args *uap) { return (kern_kqueue(td, 0, NULL)); } static void kqueue_init(struct kqueue *kq) { mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK); TAILQ_INIT(&kq->kq_head); knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); } int kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps) { struct filedesc *fdp; struct kqueue *kq; struct file *fp; struct ucred *cred; int fd, error; fdp = td->td_proc->p_fd; cred = td->td_ucred; if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES))) return (ENOMEM); error = falloc_caps(td, &fp, &fd, flags, fcaps); if (error != 0) { chgkqcnt(cred->cr_ruidinfo, -1, 0); return (error); } /* An extra reference on `fp' has been held for us by falloc(). */ kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); kqueue_init(kq); kq->kq_fdp = fdp; kq->kq_cred = crhold(cred); FILEDESC_XLOCK(fdp); TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); FILEDESC_XUNLOCK(fdp); finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); fdrop(fp, td); td->td_retval[0] = fd; return (0); } struct g_kevent_args { int fd; void *changelist; int nchanges; void *eventlist; int nevents; const struct timespec *timeout; }; int sys_kevent(struct thread *td, struct kevent_args *uap) { struct kevent_copyops k_ops = { .arg = uap, .k_copyout = kevent_copyout, .k_copyin = kevent_copyin, .kevent_size = sizeof(struct kevent), }; struct g_kevent_args gk_args = { .fd = uap->fd, .changelist = uap->changelist, .nchanges = uap->nchanges, .eventlist = uap->eventlist, .nevents = uap->nevents, .timeout = uap->timeout, }; return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent")); } static int kern_kevent_generic(struct thread *td, struct g_kevent_args *uap, struct kevent_copyops *k_ops, const char *struct_name) { struct timespec ts, *tsp; #ifdef KTRACE struct kevent *eventlist = uap->eventlist; #endif int error; if (uap->timeout != NULL) { error = copyin(uap->timeout, &ts, sizeof(ts)); if (error) return (error); tsp = &ts; } else tsp = NULL; #ifdef KTRACE if (KTRPOINT(td, KTR_STRUCT_ARRAY)) ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist, uap->nchanges, k_ops->kevent_size); #endif error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, k_ops, tsp); #ifdef KTRACE if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY)) ktrstructarray(struct_name, UIO_USERSPACE, eventlist, td->td_retval[0], k_ops->kevent_size); #endif return (error); } /* * Copy 'count' items into the destination list pointed to by uap->eventlist. */ static int kevent_copyout(void *arg, struct kevent *kevp, int count) { struct kevent_args *uap; int error; KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); uap = (struct kevent_args *)arg; error = copyout(kevp, uap->eventlist, count * sizeof *kevp); if (error == 0) uap->eventlist += count; return (error); } /* * Copy 'count' items from the list pointed to by uap->changelist. */ static int kevent_copyin(void *arg, struct kevent *kevp, int count) { struct kevent_args *uap; int error; KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); uap = (struct kevent_args *)arg; error = copyin(uap->changelist, kevp, count * sizeof *kevp); if (error == 0) uap->changelist += count; return (error); } #ifdef COMPAT_FREEBSD11 static int kevent11_copyout(void *arg, struct kevent *kevp, int count) { struct freebsd11_kevent_args *uap; struct kevent_freebsd11 kev11; int error, i; KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); uap = (struct freebsd11_kevent_args *)arg; for (i = 0; i < count; i++) { kev11.ident = kevp->ident; kev11.filter = kevp->filter; kev11.flags = kevp->flags; kev11.fflags = kevp->fflags; kev11.data = kevp->data; kev11.udata = kevp->udata; error = copyout(&kev11, uap->eventlist, sizeof(kev11)); if (error != 0) break; uap->eventlist++; kevp++; } return (error); } /* * Copy 'count' items from the list pointed to by uap->changelist. */ static int kevent11_copyin(void *arg, struct kevent *kevp, int count) { struct freebsd11_kevent_args *uap; struct kevent_freebsd11 kev11; int error, i; KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); uap = (struct freebsd11_kevent_args *)arg; for (i = 0; i < count; i++) { error = copyin(uap->changelist, &kev11, sizeof(kev11)); if (error != 0) break; kevp->ident = kev11.ident; kevp->filter = kev11.filter; kevp->flags = kev11.flags; kevp->fflags = kev11.fflags; kevp->data = (uintptr_t)kev11.data; kevp->udata = kev11.udata; bzero(&kevp->ext, sizeof(kevp->ext)); uap->changelist++; kevp++; } return (error); } int freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap) { struct kevent_copyops k_ops = { .arg = uap, .k_copyout = kevent11_copyout, .k_copyin = kevent11_copyin, .kevent_size = sizeof(struct kevent_freebsd11), }; struct g_kevent_args gk_args = { .fd = uap->fd, .changelist = uap->changelist, .nchanges = uap->nchanges, .eventlist = uap->eventlist, .nevents = uap->nevents, .timeout = uap->timeout, }; return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent_freebsd11")); } #endif int kern_kevent(struct thread *td, int fd, int nchanges, int nevents, struct kevent_copyops *k_ops, const struct timespec *timeout) { cap_rights_t rights; struct file *fp; int error; cap_rights_init(&rights); if (nchanges > 0) cap_rights_set(&rights, CAP_KQUEUE_CHANGE); if (nevents > 0) cap_rights_set(&rights, CAP_KQUEUE_EVENT); error = fget(td, fd, &rights, &fp); if (error != 0) return (error); error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); fdrop(fp, td); return (error); } static int kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, struct kevent_copyops *k_ops, const struct timespec *timeout) { struct kevent keva[KQ_NEVENTS]; struct kevent *kevp, *changes; int i, n, nerrors, error; nerrors = 0; while (nchanges > 0) { n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; error = k_ops->k_copyin(k_ops->arg, keva, n); if (error) return (error); changes = keva; for (i = 0; i < n; i++) { kevp = &changes[i]; if (!kevp->filter) continue; kevp->flags &= ~EV_SYSFLAGS; - error = kqueue_register(kq, kevp, td, 1); + error = kqueue_register(kq, kevp, td, M_WAITOK); if (error || (kevp->flags & EV_RECEIPT)) { if (nevents == 0) return (error); kevp->flags = EV_ERROR; kevp->data = error; (void)k_ops->k_copyout(k_ops->arg, kevp, 1); nevents--; nerrors++; } } nchanges -= n; } if (nerrors) { td->td_retval[0] = nerrors; return (0); } return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td)); } int kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, struct kevent_copyops *k_ops, const struct timespec *timeout) { struct kqueue *kq; int error; error = kqueue_acquire(fp, &kq); if (error != 0) return (error); error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout); kqueue_release(kq, 0); return (error); } /* * Performs a kevent() call on a temporarily created kqueue. This can be * used to perform one-shot polling, similar to poll() and select(). */ int kern_kevent_anonymous(struct thread *td, int nevents, struct kevent_copyops *k_ops) { struct kqueue kq = {}; int error; kqueue_init(&kq); kq.kq_refcnt = 1; error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL); kqueue_drain(&kq, td); kqueue_destroy(&kq); return (error); } int kqueue_add_filteropts(int filt, struct filterops *filtops) { int error; error = 0; if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { printf( "trying to add a filterop that is out of range: %d is beyond %d\n", ~filt, EVFILT_SYSCOUNT); return EINVAL; } mtx_lock(&filterops_lock); if (sysfilt_ops[~filt].for_fop != &null_filtops && sysfilt_ops[~filt].for_fop != NULL) error = EEXIST; else { sysfilt_ops[~filt].for_fop = filtops; sysfilt_ops[~filt].for_refcnt = 0; } mtx_unlock(&filterops_lock); return (error); } int kqueue_del_filteropts(int filt) { int error; error = 0; if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) return EINVAL; mtx_lock(&filterops_lock); if (sysfilt_ops[~filt].for_fop == &null_filtops || sysfilt_ops[~filt].for_fop == NULL) error = EINVAL; else if (sysfilt_ops[~filt].for_refcnt != 0) error = EBUSY; else { sysfilt_ops[~filt].for_fop = &null_filtops; sysfilt_ops[~filt].for_refcnt = 0; } mtx_unlock(&filterops_lock); return error; } static struct filterops * kqueue_fo_find(int filt) { if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) return NULL; if (sysfilt_ops[~filt].for_nolock) return sysfilt_ops[~filt].for_fop; mtx_lock(&filterops_lock); sysfilt_ops[~filt].for_refcnt++; if (sysfilt_ops[~filt].for_fop == NULL) sysfilt_ops[~filt].for_fop = &null_filtops; mtx_unlock(&filterops_lock); return sysfilt_ops[~filt].for_fop; } static void kqueue_fo_release(int filt) { if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) return; if (sysfilt_ops[~filt].for_nolock) return; mtx_lock(&filterops_lock); KASSERT(sysfilt_ops[~filt].for_refcnt > 0, ("filter object refcount not valid on release")); sysfilt_ops[~filt].for_refcnt--; mtx_unlock(&filterops_lock); } /* - * A ref to kq (obtained via kqueue_acquire) must be held. waitok will - * influence if memory allocation should wait. Make sure it is 0 if you - * hold any mutexes. + * A ref to kq (obtained via kqueue_acquire) must be held. */ static int -kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) +kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, + int mflag) { struct filterops *fops; struct file *fp; struct knote *kn, *tkn; struct knlist *knl; int error, filt, event; int haskqglobal, filedesc_unlock; if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE)) return (EINVAL); fp = NULL; kn = NULL; knl = NULL; error = 0; haskqglobal = 0; filedesc_unlock = 0; filt = kev->filter; fops = kqueue_fo_find(filt); if (fops == NULL) return EINVAL; if (kev->flags & EV_ADD) { /* * Prevent waiting with locks. Non-sleepable * allocation failures are handled in the loop, only * if the spare knote appears to be actually required. */ - tkn = knote_alloc(waitok); + tkn = knote_alloc(mflag); } else { tkn = NULL; } findkn: if (fops->f_isfd) { KASSERT(td != NULL, ("td is NULL")); if (kev->ident > INT_MAX) error = EBADF; else error = fget(td, kev->ident, &cap_event_rights, &fp); if (error) goto done; if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, - kev->ident, 0) != 0) { + kev->ident, M_NOWAIT) != 0) { /* try again */ fdrop(fp, td); fp = NULL; - error = kqueue_expand(kq, fops, kev->ident, waitok); + error = kqueue_expand(kq, fops, kev->ident, mflag); if (error) goto done; goto findkn; } if (fp->f_type == DTYPE_KQUEUE) { /* * If we add some intelligence about what we are doing, * we should be able to support events on ourselves. * We need to know when we are doing this to prevent * getting both the knlist lock and the kq lock since * they are the same thing. */ if (fp->f_data == kq) { error = EINVAL; goto done; } /* * Pre-lock the filedesc before the global * lock mutex, see the comment in * kqueue_close(). */ FILEDESC_XLOCK(td->td_proc->p_fd); filedesc_unlock = 1; KQ_GLOBAL_LOCK(&kq_global, haskqglobal); } KQ_LOCK(kq); if (kev->ident < kq->kq_knlistsize) { SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) if (kev->filter == kn->kn_filter) break; } } else { - if ((kev->flags & EV_ADD) == EV_ADD) - kqueue_expand(kq, fops, kev->ident, waitok); + if ((kev->flags & EV_ADD) == EV_ADD) { + error = kqueue_expand(kq, fops, kev->ident, mflag); + if (error != 0) + goto done; + } KQ_LOCK(kq); /* * If possible, find an existing knote to use for this kevent. */ if (kev->filter == EVFILT_PROC && (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) { /* This is an internal creation of a process tracking * note. Don't attempt to coalesce this with an * existing note. */ ; } else if (kq->kq_knhashmask != 0) { struct klist *list; list = &kq->kq_knhash[ KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; SLIST_FOREACH(kn, list, kn_link) if (kev->ident == kn->kn_id && kev->filter == kn->kn_filter) break; } } /* knote is in the process of changing, wait for it to stabilize. */ if (kn != NULL && kn_in_flux(kn)) { KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); if (filedesc_unlock) { FILEDESC_XUNLOCK(td->td_proc->p_fd); filedesc_unlock = 0; } kq->kq_state |= KQ_FLUXWAIT; msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); if (fp != NULL) { fdrop(fp, td); fp = NULL; } goto findkn; } /* * kn now contains the matching knote, or NULL if no match */ if (kn == NULL) { if (kev->flags & EV_ADD) { kn = tkn; tkn = NULL; if (kn == NULL) { KQ_UNLOCK(kq); error = ENOMEM; goto done; } kn->kn_fp = fp; kn->kn_kq = kq; kn->kn_fop = fops; /* * apply reference counts to knote structure, and * do not release it at the end of this routine. */ fops = NULL; fp = NULL; kn->kn_sfflags = kev->fflags; kn->kn_sdata = kev->data; kev->fflags = 0; kev->data = 0; kn->kn_kevent = *kev; kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT); kn->kn_status = KN_DETACHED; if ((kev->flags & EV_DISABLE) != 0) kn->kn_status |= KN_DISABLED; kn_enter_flux(kn); error = knote_attach(kn, kq); KQ_UNLOCK(kq); if (error != 0) { tkn = kn; goto done; } if ((error = kn->kn_fop->f_attach(kn)) != 0) { knote_drop_detached(kn, td); goto done; } knl = kn_list_lock(kn); goto done_ev_add; } else { /* No matching knote and the EV_ADD flag is not set. */ KQ_UNLOCK(kq); error = ENOENT; goto done; } } if (kev->flags & EV_DELETE) { kn_enter_flux(kn); KQ_UNLOCK(kq); knote_drop(kn, td); goto done; } if (kev->flags & EV_FORCEONESHOT) { kn->kn_flags |= EV_ONESHOT; KNOTE_ACTIVATE(kn, 1); } if ((kev->flags & EV_ENABLE) != 0) kn->kn_status &= ~KN_DISABLED; else if ((kev->flags & EV_DISABLE) != 0) kn->kn_status |= KN_DISABLED; /* * The user may change some filter values after the initial EV_ADD, * but doing so will not reset any filter which has already been * triggered. */ kn->kn_status |= KN_SCAN; kn_enter_flux(kn); KQ_UNLOCK(kq); knl = kn_list_lock(kn); kn->kn_kevent.udata = kev->udata; if (!fops->f_isfd && fops->f_touch != NULL) { fops->f_touch(kn, kev, EVENT_REGISTER); } else { kn->kn_sfflags = kev->fflags; kn->kn_sdata = kev->data; } done_ev_add: /* * We can get here with kn->kn_knlist == NULL. This can happen when * the initial attach event decides that the event is "completed" * already, e.g., filt_procattach() is called on a zombie process. It * will call filt_proc() which will remove it from the list, and NULL * kn_knlist. * * KN_DISABLED will be stable while the knote is in flux, so the * unlocked read will not race with an update. */ if ((kn->kn_status & KN_DISABLED) == 0) event = kn->kn_fop->f_event(kn, 0); else event = 0; KQ_LOCK(kq); if (event) kn->kn_status |= KN_ACTIVE; if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) == KN_ACTIVE) knote_enqueue(kn); kn->kn_status &= ~KN_SCAN; kn_leave_flux(kn); kn_list_unlock(knl); KQ_UNLOCK_FLUX(kq); done: KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); if (filedesc_unlock) FILEDESC_XUNLOCK(td->td_proc->p_fd); if (fp != NULL) fdrop(fp, td); knote_free(tkn); if (fops != NULL) kqueue_fo_release(filt); return (error); } static int kqueue_acquire(struct file *fp, struct kqueue **kqp) { int error; struct kqueue *kq; error = 0; kq = fp->f_data; if (fp->f_type != DTYPE_KQUEUE || kq == NULL) return (EBADF); *kqp = kq; KQ_LOCK(kq); if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { KQ_UNLOCK(kq); return (EBADF); } kq->kq_refcnt++; KQ_UNLOCK(kq); return error; } static void kqueue_release(struct kqueue *kq, int locked) { if (locked) KQ_OWNED(kq); else KQ_LOCK(kq); kq->kq_refcnt--; if (kq->kq_refcnt == 1) wakeup(&kq->kq_refcnt); if (!locked) KQ_UNLOCK(kq); } static void kqueue_schedtask(struct kqueue *kq) { KQ_OWNED(kq); KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), ("scheduling kqueue task while draining")); if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task); kq->kq_state |= KQ_TASKSCHED; } } /* * Expand the kq to make sure we have storage for fops/ident pair. * * Return 0 on success (or no work necessary), return errno on failure. */ static int kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, - int waitok) + int mflag) { struct klist *list, *tmp_knhash, *to_free; u_long tmp_knhashmask; - int size; - int fd; - int mflag = waitok ? M_WAITOK : M_NOWAIT; + int error, fd, size; KQ_NOTOWNED(kq); + error = 0; to_free = NULL; if (fops->f_isfd) { fd = ident; if (kq->kq_knlistsize <= fd) { size = kq->kq_knlistsize; while (size <= fd) size += KQEXTENT; list = malloc(size * sizeof(*list), M_KQUEUE, mflag); if (list == NULL) return ENOMEM; KQ_LOCK(kq); - if (kq->kq_knlistsize > fd) { + if ((kq->kq_state & KQ_CLOSING) != 0) { to_free = list; - list = NULL; + error = EBADF; + } else if (kq->kq_knlistsize > fd) { + to_free = list; } else { if (kq->kq_knlist != NULL) { bcopy(kq->kq_knlist, list, kq->kq_knlistsize * sizeof(*list)); to_free = kq->kq_knlist; kq->kq_knlist = NULL; } bzero((caddr_t)list + kq->kq_knlistsize * sizeof(*list), (size - kq->kq_knlistsize) * sizeof(*list)); kq->kq_knlistsize = size; kq->kq_knlist = list; } KQ_UNLOCK(kq); } } else { if (kq->kq_knhashmask == 0) { tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE, - &tmp_knhashmask, - waitok ? HASH_WAITOK : HASH_NOWAIT); + &tmp_knhashmask, (mflag & M_WAITOK) != 0 ? + HASH_WAITOK : HASH_NOWAIT); if (tmp_knhash == NULL) - return ENOMEM; + return (ENOMEM); KQ_LOCK(kq); - if (kq->kq_knhashmask == 0) { + if ((kq->kq_state & KQ_CLOSING) != 0) { + to_free = tmp_knhash; + error = EBADF; + } else if (kq->kq_knhashmask == 0) { kq->kq_knhash = tmp_knhash; kq->kq_knhashmask = tmp_knhashmask; } else { to_free = tmp_knhash; } KQ_UNLOCK(kq); } } free(to_free, M_KQUEUE); KQ_NOTOWNED(kq); - return 0; + return (error); } static void kqueue_task(void *arg, int pending) { struct kqueue *kq; int haskqglobal; haskqglobal = 0; kq = arg; KQ_GLOBAL_LOCK(&kq_global, haskqglobal); KQ_LOCK(kq); KNOTE_LOCKED(&kq->kq_sel.si_note, 0); kq->kq_state &= ~KQ_TASKSCHED; if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { wakeup(&kq->kq_state); } KQ_UNLOCK(kq); KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); } /* * Scan, update kn_data (if not ONESHOT), and copyout triggered events. * We treat KN_MARKER knotes as if they are in flux. */ static int kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, const struct timespec *tsp, struct kevent *keva, struct thread *td) { struct kevent *kevp; struct knote *kn, *marker; struct knlist *knl; sbintime_t asbt, rsbt; int count, error, haskqglobal, influx, nkev, touch; count = maxevents; nkev = 0; error = 0; haskqglobal = 0; if (maxevents == 0) goto done_nl; rsbt = 0; if (tsp != NULL) { if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000) { error = EINVAL; goto done_nl; } if (timespecisset(tsp)) { if (tsp->tv_sec <= INT32_MAX) { rsbt = tstosbt(*tsp); if (TIMESEL(&asbt, rsbt)) asbt += tc_tick_sbt; if (asbt <= SBT_MAX - rsbt) asbt += rsbt; else asbt = 0; rsbt >>= tc_precexp; } else asbt = 0; } else asbt = -1; } else asbt = 0; - marker = knote_alloc(1); + marker = knote_alloc(M_WAITOK); marker->kn_status = KN_MARKER; KQ_LOCK(kq); retry: kevp = keva; if (kq->kq_count == 0) { if (asbt == -1) { error = EWOULDBLOCK; } else { kq->kq_state |= KQ_SLEEP; error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, "kqread", asbt, rsbt, C_ABSOLUTE); } if (error == 0) goto retry; /* don't restart after signals... */ if (error == ERESTART) error = EINTR; else if (error == EWOULDBLOCK) error = 0; goto done; } TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); influx = 0; while (count) { KQ_OWNED(kq); kn = TAILQ_FIRST(&kq->kq_head); if ((kn->kn_status == KN_MARKER && kn != marker) || kn_in_flux(kn)) { if (influx) { influx = 0; KQ_FLUX_WAKEUP(kq); } kq->kq_state |= KQ_FLUXWAIT; error = msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); continue; } TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { kn->kn_status &= ~KN_QUEUED; kq->kq_count--; continue; } if (kn == marker) { KQ_FLUX_WAKEUP(kq); if (count == maxevents) goto retry; goto done; } KASSERT(!kn_in_flux(kn), ("knote %p is unexpectedly in flux", kn)); if ((kn->kn_flags & EV_DROP) == EV_DROP) { kn->kn_status &= ~KN_QUEUED; kn_enter_flux(kn); kq->kq_count--; KQ_UNLOCK(kq); /* * We don't need to lock the list since we've * marked it as in flux. */ knote_drop(kn, td); KQ_LOCK(kq); continue; } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { kn->kn_status &= ~KN_QUEUED; kn_enter_flux(kn); kq->kq_count--; KQ_UNLOCK(kq); /* * We don't need to lock the list since we've * marked the knote as being in flux. */ *kevp = kn->kn_kevent; knote_drop(kn, td); KQ_LOCK(kq); kn = NULL; } else { kn->kn_status |= KN_SCAN; kn_enter_flux(kn); KQ_UNLOCK(kq); if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) KQ_GLOBAL_LOCK(&kq_global, haskqglobal); knl = kn_list_lock(kn); if (kn->kn_fop->f_event(kn, 0) == 0) { KQ_LOCK(kq); KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE | KN_SCAN); kn_leave_flux(kn); kq->kq_count--; kn_list_unlock(knl); influx = 1; continue; } touch = (!kn->kn_fop->f_isfd && kn->kn_fop->f_touch != NULL); if (touch) kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); else *kevp = kn->kn_kevent; KQ_LOCK(kq); KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { /* * Manually clear knotes who weren't * 'touch'ed. */ if (touch == 0 && kn->kn_flags & EV_CLEAR) { kn->kn_data = 0; kn->kn_fflags = 0; } if (kn->kn_flags & EV_DISPATCH) kn->kn_status |= KN_DISABLED; kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); kq->kq_count--; } else TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); kn->kn_status &= ~KN_SCAN; kn_leave_flux(kn); kn_list_unlock(knl); influx = 1; } /* we are returning a copy to the user */ kevp++; nkev++; count--; if (nkev == KQ_NEVENTS) { influx = 0; KQ_UNLOCK_FLUX(kq); error = k_ops->k_copyout(k_ops->arg, keva, nkev); nkev = 0; kevp = keva; KQ_LOCK(kq); if (error) break; } } TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); done: KQ_OWNED(kq); KQ_UNLOCK_FLUX(kq); knote_free(marker); done_nl: KQ_NOTOWNED(kq); if (nkev != 0) error = k_ops->k_copyout(k_ops->arg, keva, nkev); td->td_retval[0] = maxevents - count; return (error); } /*ARGSUSED*/ static int kqueue_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred, struct thread *td) { /* * Enabling sigio causes two major problems: * 1) infinite recursion: * Synopsys: kevent is being used to track signals and have FIOASYNC * set. On receipt of a signal this will cause a kqueue to recurse * into itself over and over. Sending the sigio causes the kqueue * to become ready, which in turn posts sigio again, forever. * Solution: this can be solved by setting a flag in the kqueue that * we have a SIGIO in progress. * 2) locking problems: * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts * us above the proc and pgrp locks. * Solution: Post a signal using an async mechanism, being sure to * record a generation count in the delivery so that we do not deliver * a signal to the wrong process. * * Note, these two mechanisms are somewhat mutually exclusive! */ #if 0 struct kqueue *kq; kq = fp->f_data; switch (cmd) { case FIOASYNC: if (*(int *)data) { kq->kq_state |= KQ_ASYNC; } else { kq->kq_state &= ~KQ_ASYNC; } return (0); case FIOSETOWN: return (fsetown(*(int *)data, &kq->kq_sigio)); case FIOGETOWN: *(int *)data = fgetown(&kq->kq_sigio); return (0); } #endif return (ENOTTY); } /*ARGSUSED*/ static int kqueue_poll(struct file *fp, int events, struct ucred *active_cred, struct thread *td) { struct kqueue *kq; int revents = 0; int error; if ((error = kqueue_acquire(fp, &kq))) return POLLERR; KQ_LOCK(kq); if (events & (POLLIN | POLLRDNORM)) { if (kq->kq_count) { revents |= events & (POLLIN | POLLRDNORM); } else { selrecord(td, &kq->kq_sel); if (SEL_WAITING(&kq->kq_sel)) kq->kq_state |= KQ_SEL; } } kqueue_release(kq, 1); KQ_UNLOCK(kq); return (revents); } /*ARGSUSED*/ static int kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, struct thread *td) { bzero((void *)st, sizeof *st); /* * We no longer return kq_count because the unlocked value is useless. * If you spent all this time getting the count, why not spend your * syscall better by calling kevent? * * XXX - This is needed for libc_r. */ st->st_mode = S_IFIFO; return (0); } static void kqueue_drain(struct kqueue *kq, struct thread *td) { struct knote *kn; int i; KQ_LOCK(kq); KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, ("kqueue already closing")); kq->kq_state |= KQ_CLOSING; if (kq->kq_refcnt > 1) msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); KASSERT(knlist_empty(&kq->kq_sel.si_note), ("kqueue's knlist not empty")); for (i = 0; i < kq->kq_knlistsize; i++) { while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { if (kn_in_flux(kn)) { kq->kq_state |= KQ_FLUXWAIT; msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); continue; } kn_enter_flux(kn); KQ_UNLOCK(kq); knote_drop(kn, td); KQ_LOCK(kq); } } if (kq->kq_knhashmask != 0) { for (i = 0; i <= kq->kq_knhashmask; i++) { while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { if (kn_in_flux(kn)) { kq->kq_state |= KQ_FLUXWAIT; msleep(kq, &kq->kq_lock, PSOCK, "kqclo2", 0); continue; } kn_enter_flux(kn); KQ_UNLOCK(kq); knote_drop(kn, td); KQ_LOCK(kq); } } } if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { kq->kq_state |= KQ_TASKDRAIN; msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); } if ((kq->kq_state & KQ_SEL) == KQ_SEL) { selwakeuppri(&kq->kq_sel, PSOCK); if (!SEL_WAITING(&kq->kq_sel)) kq->kq_state &= ~KQ_SEL; } KQ_UNLOCK(kq); } static void kqueue_destroy(struct kqueue *kq) { KASSERT(kq->kq_fdp == NULL, ("kqueue still attached to a file descriptor")); seldrain(&kq->kq_sel); knlist_destroy(&kq->kq_sel.si_note); mtx_destroy(&kq->kq_lock); if (kq->kq_knhash != NULL) free(kq->kq_knhash, M_KQUEUE); if (kq->kq_knlist != NULL) free(kq->kq_knlist, M_KQUEUE); funsetown(&kq->kq_sigio); } /*ARGSUSED*/ static int kqueue_close(struct file *fp, struct thread *td) { struct kqueue *kq = fp->f_data; struct filedesc *fdp; int error; int filedesc_unlock; if ((error = kqueue_acquire(fp, &kq))) return error; kqueue_drain(kq, td); /* * We could be called due to the knote_drop() doing fdrop(), * called from kqueue_register(). In this case the global * lock is owned, and filedesc sx is locked before, to not * take the sleepable lock after non-sleepable. */ fdp = kq->kq_fdp; kq->kq_fdp = NULL; if (!sx_xlocked(FILEDESC_LOCK(fdp))) { FILEDESC_XLOCK(fdp); filedesc_unlock = 1; } else filedesc_unlock = 0; TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); if (filedesc_unlock) FILEDESC_XUNLOCK(fdp); kqueue_destroy(kq); chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); crfree(kq->kq_cred); free(kq, M_KQUEUE); fp->f_data = NULL; return (0); } static int kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { kif->kf_type = KF_TYPE_KQUEUE; return (0); } static void kqueue_wakeup(struct kqueue *kq) { KQ_OWNED(kq); if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { kq->kq_state &= ~KQ_SLEEP; wakeup(kq); } if ((kq->kq_state & KQ_SEL) == KQ_SEL) { selwakeuppri(&kq->kq_sel, PSOCK); if (!SEL_WAITING(&kq->kq_sel)) kq->kq_state &= ~KQ_SEL; } if (!knlist_empty(&kq->kq_sel.si_note)) kqueue_schedtask(kq); if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { pgsigio(&kq->kq_sigio, SIGIO, 0); } } /* * Walk down a list of knotes, activating them if their event has triggered. * * There is a possibility to optimize in the case of one kq watching another. * Instead of scheduling a task to wake it up, you could pass enough state * down the chain to make up the parent kqueue. Make this code functional * first. */ void knote(struct knlist *list, long hint, int lockflags) { struct kqueue *kq; struct knote *kn, *tkn; int error; if (list == NULL) return; KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); if ((lockflags & KNF_LISTLOCKED) == 0) list->kl_lock(list->kl_lockarg); /* * If we unlock the list lock (and enter influx), we can * eliminate the kqueue scheduling, but this will introduce * four lock/unlock's for each knote to test. Also, marker * would be needed to keep iteration position, since filters * or other threads could remove events. */ SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { kq = kn->kn_kq; KQ_LOCK(kq); if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { /* * Do not process the influx notes, except for * the influx coming from the kq unlock in the * kqueue_scan(). In the later case, we do * not interfere with the scan, since the code * fragment in kqueue_scan() locks the knlist, * and cannot proceed until we finished. */ KQ_UNLOCK(kq); } else if ((lockflags & KNF_NOKQLOCK) != 0) { kn_enter_flux(kn); KQ_UNLOCK(kq); error = kn->kn_fop->f_event(kn, hint); KQ_LOCK(kq); kn_leave_flux(kn); if (error) KNOTE_ACTIVATE(kn, 1); KQ_UNLOCK_FLUX(kq); } else { if (kn->kn_fop->f_event(kn, hint)) KNOTE_ACTIVATE(kn, 1); KQ_UNLOCK(kq); } } if ((lockflags & KNF_LISTLOCKED) == 0) list->kl_unlock(list->kl_lockarg); } /* * add a knote to a knlist */ void knlist_add(struct knlist *knl, struct knote *kn, int islocked) { KNL_ASSERT_LOCK(knl, islocked); KQ_NOTOWNED(kn->kn_kq); KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn)); KASSERT((kn->kn_status & KN_DETACHED) != 0, ("knote %p was not detached", kn)); if (!islocked) knl->kl_lock(knl->kl_lockarg); SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); if (!islocked) knl->kl_unlock(knl->kl_lockarg); KQ_LOCK(kn->kn_kq); kn->kn_knlist = knl; kn->kn_status &= ~KN_DETACHED; KQ_UNLOCK(kn->kn_kq); } static void knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked) { KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked")); KNL_ASSERT_LOCK(knl, knlislocked); mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn)); KASSERT((kn->kn_status & KN_DETACHED) == 0, ("knote %p was already detached", kn)); if (!knlislocked) knl->kl_lock(knl->kl_lockarg); SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); kn->kn_knlist = NULL; if (!knlislocked) kn_list_unlock(knl); if (!kqislocked) KQ_LOCK(kn->kn_kq); kn->kn_status |= KN_DETACHED; if (!kqislocked) KQ_UNLOCK(kn->kn_kq); } /* * remove knote from the specified knlist */ void knlist_remove(struct knlist *knl, struct knote *kn, int islocked) { knlist_remove_kq(knl, kn, islocked, 0); } int knlist_empty(struct knlist *knl) { KNL_ASSERT_LOCKED(knl); return (SLIST_EMPTY(&knl->kl_list)); } static struct mtx knlist_lock; MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", MTX_DEF); static void knlist_mtx_lock(void *arg); static void knlist_mtx_unlock(void *arg); static void knlist_mtx_lock(void *arg) { mtx_lock((struct mtx *)arg); } static void knlist_mtx_unlock(void *arg) { mtx_unlock((struct mtx *)arg); } static void knlist_mtx_assert_locked(void *arg) { mtx_assert((struct mtx *)arg, MA_OWNED); } static void knlist_mtx_assert_unlocked(void *arg) { mtx_assert((struct mtx *)arg, MA_NOTOWNED); } static void knlist_rw_rlock(void *arg) { rw_rlock((struct rwlock *)arg); } static void knlist_rw_runlock(void *arg) { rw_runlock((struct rwlock *)arg); } static void knlist_rw_assert_locked(void *arg) { rw_assert((struct rwlock *)arg, RA_LOCKED); } static void knlist_rw_assert_unlocked(void *arg) { rw_assert((struct rwlock *)arg, RA_UNLOCKED); } void knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), void (*kl_unlock)(void *), void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *)) { if (lock == NULL) knl->kl_lockarg = &knlist_lock; else knl->kl_lockarg = lock; if (kl_lock == NULL) knl->kl_lock = knlist_mtx_lock; else knl->kl_lock = kl_lock; if (kl_unlock == NULL) knl->kl_unlock = knlist_mtx_unlock; else knl->kl_unlock = kl_unlock; if (kl_assert_locked == NULL) knl->kl_assert_locked = knlist_mtx_assert_locked; else knl->kl_assert_locked = kl_assert_locked; if (kl_assert_unlocked == NULL) knl->kl_assert_unlocked = knlist_mtx_assert_unlocked; else knl->kl_assert_unlocked = kl_assert_unlocked; knl->kl_autodestroy = 0; SLIST_INIT(&knl->kl_list); } void knlist_init_mtx(struct knlist *knl, struct mtx *lock) { knlist_init(knl, lock, NULL, NULL, NULL, NULL); } struct knlist * knlist_alloc(struct mtx *lock) { struct knlist *knl; knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK); knlist_init_mtx(knl, lock); return (knl); } void knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) { knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, knlist_rw_assert_locked, knlist_rw_assert_unlocked); } void knlist_destroy(struct knlist *knl) { KASSERT(KNLIST_EMPTY(knl), ("destroying knlist %p with knotes on it", knl)); } void knlist_detach(struct knlist *knl) { KNL_ASSERT_LOCKED(knl); knl->kl_autodestroy = 1; if (knlist_empty(knl)) { knlist_destroy(knl); free(knl, M_KQUEUE); } } /* * Even if we are locked, we may need to drop the lock to allow any influx * knotes time to "settle". */ void knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) { struct knote *kn, *kn2; struct kqueue *kq; KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl)); if (islocked) KNL_ASSERT_LOCKED(knl); else { KNL_ASSERT_UNLOCKED(knl); again: /* need to reacquire lock since we have dropped it */ knl->kl_lock(knl->kl_lockarg); } SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { kq = kn->kn_kq; KQ_LOCK(kq); if (kn_in_flux(kn)) { KQ_UNLOCK(kq); continue; } knlist_remove_kq(knl, kn, 1, 1); if (killkn) { kn_enter_flux(kn); KQ_UNLOCK(kq); knote_drop_detached(kn, td); } else { /* Make sure cleared knotes disappear soon */ kn->kn_flags |= EV_EOF | EV_ONESHOT; KQ_UNLOCK(kq); } kq = NULL; } if (!SLIST_EMPTY(&knl->kl_list)) { /* there are still in flux knotes remaining */ kn = SLIST_FIRST(&knl->kl_list); kq = kn->kn_kq; KQ_LOCK(kq); KASSERT(kn_in_flux(kn), ("knote removed w/o list lock")); knl->kl_unlock(knl->kl_lockarg); kq->kq_state |= KQ_FLUXWAIT; msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); kq = NULL; goto again; } if (islocked) KNL_ASSERT_LOCKED(knl); else { knl->kl_unlock(knl->kl_lockarg); KNL_ASSERT_UNLOCKED(knl); } } /* * Remove all knotes referencing a specified fd must be called with FILEDESC * lock. This prevents a race where a new fd comes along and occupies the * entry and we attach a knote to the fd. */ void knote_fdclose(struct thread *td, int fd) { struct filedesc *fdp = td->td_proc->p_fd; struct kqueue *kq; struct knote *kn; int influx; FILEDESC_XLOCK_ASSERT(fdp); /* * We shouldn't have to worry about new kevents appearing on fd * since filedesc is locked. */ TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { KQ_LOCK(kq); again: influx = 0; while (kq->kq_knlistsize > fd && (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { if (kn_in_flux(kn)) { /* someone else might be waiting on our knote */ if (influx) wakeup(kq); kq->kq_state |= KQ_FLUXWAIT; msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); goto again; } kn_enter_flux(kn); KQ_UNLOCK(kq); influx = 1; knote_drop(kn, td); KQ_LOCK(kq); } KQ_UNLOCK_FLUX(kq); } } static int knote_attach(struct knote *kn, struct kqueue *kq) { struct klist *list; KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn)); KQ_OWNED(kq); + if ((kq->kq_state & KQ_CLOSING) != 0) + return (EBADF); if (kn->kn_fop->f_isfd) { if (kn->kn_id >= kq->kq_knlistsize) return (ENOMEM); list = &kq->kq_knlist[kn->kn_id]; } else { if (kq->kq_knhash == NULL) return (ENOMEM); list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; } SLIST_INSERT_HEAD(list, kn, kn_link); return (0); } static void knote_drop(struct knote *kn, struct thread *td) { if ((kn->kn_status & KN_DETACHED) == 0) kn->kn_fop->f_detach(kn); knote_drop_detached(kn, td); } static void knote_drop_detached(struct knote *kn, struct thread *td) { struct kqueue *kq; struct klist *list; kq = kn->kn_kq; KASSERT((kn->kn_status & KN_DETACHED) != 0, ("knote %p still attached", kn)); KQ_NOTOWNED(kq); KQ_LOCK(kq); KASSERT(kn->kn_influx == 1, ("knote_drop called on %p with influx %d", kn, kn->kn_influx)); if (kn->kn_fop->f_isfd) list = &kq->kq_knlist[kn->kn_id]; else list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; if (!SLIST_EMPTY(list)) SLIST_REMOVE(list, kn, knote, kn_link); if (kn->kn_status & KN_QUEUED) knote_dequeue(kn); KQ_UNLOCK_FLUX(kq); if (kn->kn_fop->f_isfd) { fdrop(kn->kn_fp, td); kn->kn_fp = NULL; } kqueue_fo_release(kn->kn_kevent.filter); kn->kn_fop = NULL; knote_free(kn); } static void knote_enqueue(struct knote *kn) { struct kqueue *kq = kn->kn_kq; KQ_OWNED(kn->kn_kq); KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); kn->kn_status |= KN_QUEUED; kq->kq_count++; kqueue_wakeup(kq); } static void knote_dequeue(struct knote *kn) { struct kqueue *kq = kn->kn_kq; KQ_OWNED(kn->kn_kq); KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); kn->kn_status &= ~KN_QUEUED; kq->kq_count--; } static void knote_init(void) { knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); } SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); static struct knote * -knote_alloc(int waitok) +knote_alloc(int mflag) { - return (uma_zalloc(knote_zone, (waitok ? M_WAITOK : M_NOWAIT) | - M_ZERO)); + return (uma_zalloc(knote_zone, mflag | M_ZERO)); } static void knote_free(struct knote *kn) { uma_zfree(knote_zone, kn); } /* * Register the kev w/ the kq specified by fd. */ int -kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) +kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag) { struct kqueue *kq; struct file *fp; cap_rights_t rights; int error; error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp); if (error != 0) return (error); if ((error = kqueue_acquire(fp, &kq)) != 0) goto noacquire; - error = kqueue_register(kq, kev, td, waitok); + error = kqueue_register(kq, kev, td, mflag); kqueue_release(kq, 0); noacquire: fdrop(fp, td); return (error); } Index: projects/clang700-import/sys/kern/subr_blist.c =================================================================== --- projects/clang700-import/sys/kern/subr_blist.c (revision 340917) +++ projects/clang700-import/sys/kern/subr_blist.c (revision 340918) @@ -1,1144 +1,1146 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1998 Matthew Dillon. All Rights Reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * BLIST.C - Bitmap allocator/deallocator, using a radix tree with hinting * * This module implements a general bitmap allocator/deallocator. The * allocator eats around 2 bits per 'block'. The module does not * try to interpret the meaning of a 'block' other than to return * SWAPBLK_NONE on an allocation failure. * * A radix tree controls access to pieces of the bitmap, and includes * auxiliary information at each interior node about the availabilty of * contiguous free blocks in the subtree rooted at that node. Two radix * constants are involved: one for the size of the bitmaps contained in the * leaf nodes (BLIST_BMAP_RADIX), and one for the number of descendents of * each of the meta (interior) nodes (BLIST_META_RADIX). Each subtree is * associated with a range of blocks. The root of any subtree stores a * hint field that defines an upper bound on the size of the largest * allocation that can begin in the associated block range. A hint is an * upper bound on a potential allocation, but not necessarily a tight upper * bound. * * The bitmap field in each node directs the search for available blocks. * For a leaf node, a bit is set if the corresponding block is free. For a * meta node, a bit is set if the corresponding subtree contains a free * block somewhere within it. The search at a meta node considers only * children of that node that represent a range that includes a free block. * * The hinting greatly increases code efficiency for allocations while * the general radix structure optimizes both allocations and frees. The * radix tree should be able to operate well no matter how much * fragmentation there is and no matter how large a bitmap is used. * * The blist code wires all necessary memory at creation time. Neither * allocations nor frees require interaction with the memory subsystem. * The non-blocking nature of allocations and frees is required by swap * code (vm/swap_pager.c). * * LAYOUT: The radix tree is laid out recursively using a linear array. * Each meta node is immediately followed (laid out sequentially in * memory) by BLIST_META_RADIX lower level nodes. This is a recursive * structure but one that can be easily scanned through a very simple * 'skip' calculation. The memory allocation is only large enough to * cover the number of blocks requested at creation time. Nodes that * represent blocks beyond that limit, nodes that would never be read * or written, are not allocated, so that the last of the * BLIST_META_RADIX lower level nodes of a some nodes may not be * allocated. * * NOTE: the allocator cannot currently allocate more than * BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too * large' if you try. This is an area that could use improvement. The * radix is large enough that this restriction does not effect the swap * system, though. Currently only the allocation code is affected by * this algorithmic unfeature. The freeing code can handle arbitrary * ranges. * * This code can be compiled stand-alone for debugging. */ #include __FBSDID("$FreeBSD$"); #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #else #ifndef BLIST_NO_DEBUG #define BLIST_DEBUG #endif #include #include #include #include #include #include #include #include #include #include #define bitcount64(x) __bitcount64((uint64_t)(x)) #define malloc(a,b,c) calloc(a, 1) #define free(a,b) free(a) #define ummin(a,b) ((a) < (b) ? (a) : (b)) #include void panic(const char *ctl, ...); #endif /* * static support functions */ static daddr_t blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count); static daddr_t blst_meta_alloc(blmeta_t *scan, daddr_t cursor, daddr_t count, u_daddr_t radix); static void blst_leaf_free(blmeta_t *scan, daddr_t relblk, int count); static void blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, u_daddr_t radix); static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, blist_t dest, daddr_t count); static daddr_t blst_leaf_fill(blmeta_t *scan, daddr_t blk, int count); static daddr_t blst_meta_fill(blmeta_t *scan, daddr_t allocBlk, daddr_t count, u_daddr_t radix); #ifndef _KERNEL static void blst_radix_print(blmeta_t *scan, daddr_t blk, daddr_t radix, int tab); #endif #ifdef _KERNEL static MALLOC_DEFINE(M_SWAP, "SWAP", "Swap space"); #endif _Static_assert(BLIST_BMAP_RADIX % BLIST_META_RADIX == 0, "radix divisibility error"); #define BLIST_BMAP_MASK (BLIST_BMAP_RADIX - 1) #define BLIST_META_MASK (BLIST_META_RADIX - 1) /* * For a subtree that can represent the state of up to 'radix' blocks, the * number of leaf nodes of the subtree is L=radix/BLIST_BMAP_RADIX. If 'm' * is short for BLIST_META_RADIX, then for a tree of height h with L=m**h * leaf nodes, the total number of tree nodes is 1 + m + m**2 + ... + m**h, * or, equivalently, (m**(h+1)-1)/(m-1). This quantity is called 'skip' * in the 'meta' functions that process subtrees. Since integer division * discards remainders, we can express this computation as * skip = (m * m**h) / (m - 1) * skip = (m * (radix / BLIST_BMAP_RADIX)) / (m - 1) * and since m divides BLIST_BMAP_RADIX, we can simplify further to * skip = (radix / (BLIST_BMAP_RADIX / m)) / (m - 1) * skip = radix / ((BLIST_BMAP_RADIX / m) * (m - 1)) * so that simple integer division by a constant can safely be used for the * calculation. */ static inline daddr_t radix_to_skip(daddr_t radix) { return (radix / ((BLIST_BMAP_RADIX / BLIST_META_RADIX) * BLIST_META_MASK)); } /* * Provide a mask with count bits set, starting as position n. */ static inline u_daddr_t bitrange(int n, int count) { return (((u_daddr_t)-1 << n) & ((u_daddr_t)-1 >> (BLIST_BMAP_RADIX - (n + count)))); } /* * Use binary search, or a faster method, to find the 1 bit in a u_daddr_t. * Assumes that the argument has only one bit set. */ static inline int bitpos(u_daddr_t mask) { int hi, lo, mid; switch (sizeof(mask)) { #ifdef HAVE_INLINE_FFSLL case sizeof(long long): return (ffsll(mask) - 1); #endif default: lo = 0; hi = BLIST_BMAP_RADIX; while (lo + 1 < hi) { mid = (lo + hi) >> 1; if ((mask >> mid) != 0) lo = mid; else hi = mid; } return (lo); } } /* * blist_create() - create a blist capable of handling up to the specified * number of blocks * * blocks - must be greater than 0 * flags - malloc flags * * The smallest blist consists of a single leaf node capable of * managing BLIST_BMAP_RADIX blocks. */ blist_t blist_create(daddr_t blocks, int flags) { blist_t bl; u_daddr_t nodes, radix; if (blocks == 0) panic("invalid block count"); /* * Calculate the radix and node count used for scanning. */ nodes = 1; radix = BLIST_BMAP_RADIX; while (radix <= blocks) { nodes += 1 + (blocks - 1) / radix; radix *= BLIST_META_RADIX; } bl = malloc(offsetof(struct blist, bl_root[nodes]), M_SWAP, flags | M_ZERO); if (bl == NULL) return (NULL); bl->bl_blocks = blocks; bl->bl_radix = radix; #if defined(BLIST_DEBUG) printf( "BLIST representing %lld blocks (%lld MB of swap)" ", requiring %lldK of ram\n", (long long)bl->bl_blocks, (long long)bl->bl_blocks * 4 / 1024, (long long)(nodes * sizeof(blmeta_t) + 1023) / 1024 ); printf("BLIST raw radix tree contains %lld records\n", (long long)nodes); #endif return (bl); } void blist_destroy(blist_t bl) { free(bl, M_SWAP); } /* * blist_alloc() - reserve space in the block bitmap. Return the base * of a contiguous region or SWAPBLK_NONE if space could * not be allocated. */ daddr_t blist_alloc(blist_t bl, daddr_t count) { daddr_t blk; if (count > BLIST_MAX_ALLOC) panic("allocation too large"); /* * This loop iterates at most twice. An allocation failure in the * first iteration leads to a second iteration only if the cursor was * non-zero. When the cursor is zero, an allocation failure will * reduce the hint, stopping further iterations. */ while (count <= bl->bl_root->bm_bighint) { blk = blst_meta_alloc(bl->bl_root, bl->bl_cursor, count, bl->bl_radix); if (blk != SWAPBLK_NONE) { bl->bl_avail -= count; bl->bl_cursor = blk + count; if (bl->bl_cursor == bl->bl_blocks) bl->bl_cursor = 0; return (blk); } bl->bl_cursor = 0; } return (SWAPBLK_NONE); } /* * blist_avail() - return the number of free blocks. */ daddr_t blist_avail(blist_t bl) { return (bl->bl_avail); } /* * blist_free() - free up space in the block bitmap. Return the base * of a contiguous region. Panic if an inconsistancy is * found. */ void blist_free(blist_t bl, daddr_t blkno, daddr_t count) { if (blkno < 0 || blkno + count > bl->bl_blocks) panic("freeing invalid range"); blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix); bl->bl_avail += count; } /* * blist_fill() - mark a region in the block bitmap as off-limits * to the allocator (i.e. allocate it), ignoring any * existing allocations. Return the number of blocks * actually filled that were free before the call. */ daddr_t blist_fill(blist_t bl, daddr_t blkno, daddr_t count) { daddr_t filled; if (blkno < 0 || blkno + count > bl->bl_blocks) panic("filling invalid range"); filled = blst_meta_fill(bl->bl_root, blkno, count, bl->bl_radix); bl->bl_avail -= filled; return (filled); } /* * blist_resize() - resize an existing radix tree to handle the * specified number of blocks. This will reallocate * the tree and transfer the previous bitmap to the new * one. When extending the tree you can specify whether * the new blocks are to left allocated or freed. */ void blist_resize(blist_t *pbl, daddr_t count, int freenew, int flags) { blist_t newbl = blist_create(count, flags); blist_t save = *pbl; *pbl = newbl; if (count > save->bl_blocks) count = save->bl_blocks; blst_copy(save->bl_root, 0, save->bl_radix, newbl, count); /* * If resizing upwards, should we free the new space or not? */ if (freenew && count < newbl->bl_blocks) { blist_free(newbl, count, newbl->bl_blocks - count); } blist_destroy(save); } #ifdef BLIST_DEBUG /* * blist_print() - dump radix tree */ void blist_print(blist_t bl) { printf("BLIST avail = %jd, cursor = %08jx {\n", (uintmax_t)bl->bl_avail, (uintmax_t)bl->bl_cursor); if (bl->bl_root->bm_bitmap != 0) blst_radix_print(bl->bl_root, 0, bl->bl_radix, 4); printf("}\n"); } #endif static const u_daddr_t fib[] = { 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181, 6765, 10946, 17711, 28657, 46368, 75025, 121393, 196418, 317811, 514229, 832040, 1346269, 2178309, 3524578, }; /* * Use 'gap' to describe a maximal range of unallocated blocks/bits. */ struct gap_stats { daddr_t start; /* current gap start, or SWAPBLK_NONE */ daddr_t num; /* number of gaps observed */ daddr_t max; /* largest gap size */ daddr_t avg; /* average gap size */ daddr_t err; /* sum - num * avg */ daddr_t histo[nitems(fib)]; /* # gaps in each size range */ int max_bucket; /* last histo elt with nonzero val */ }; /* * gap_stats_counting() - is the state 'counting 1 bits'? * or 'skipping 0 bits'? */ static inline bool gap_stats_counting(const struct gap_stats *stats) { return (stats->start != SWAPBLK_NONE); } /* * init_gap_stats() - initialize stats on gap sizes */ static inline void init_gap_stats(struct gap_stats *stats) { bzero(stats, sizeof(*stats)); stats->start = SWAPBLK_NONE; } /* * update_gap_stats() - update stats on gap sizes */ static void update_gap_stats(struct gap_stats *stats, daddr_t posn) { daddr_t size; int hi, lo, mid; if (!gap_stats_counting(stats)) { stats->start = posn; return; } size = posn - stats->start; stats->start = SWAPBLK_NONE; if (size > stats->max) stats->max = size; /* * Find the fibonacci range that contains size, * expecting to find it in an early range. */ lo = 0; hi = 1; while (hi < nitems(fib) && fib[hi] <= size) { lo = hi; hi *= 2; } if (hi >= nitems(fib)) hi = nitems(fib); while (lo + 1 != hi) { mid = (lo + hi) >> 1; if (fib[mid] <= size) lo = mid; else hi = mid; } stats->histo[lo]++; if (lo > stats->max_bucket) stats->max_bucket = lo; stats->err += size - stats->avg; stats->num++; stats->avg += stats->err / stats->num; stats->err %= stats->num; } /* * dump_gap_stats() - print stats on gap sizes */ static inline void dump_gap_stats(const struct gap_stats *stats, struct sbuf *s) { int i; sbuf_printf(s, "number of maximal free ranges: %jd\n", (intmax_t)stats->num); sbuf_printf(s, "largest free range: %jd\n", (intmax_t)stats->max); sbuf_printf(s, "average maximal free range size: %jd\n", (intmax_t)stats->avg); sbuf_printf(s, "number of maximal free ranges of different sizes:\n"); sbuf_printf(s, " count | size range\n"); sbuf_printf(s, " ----- | ----------\n"); for (i = 0; i < stats->max_bucket; i++) { if (stats->histo[i] != 0) { sbuf_printf(s, "%20jd | ", (intmax_t)stats->histo[i]); if (fib[i] != fib[i + 1] - 1) sbuf_printf(s, "%jd to %jd\n", (intmax_t)fib[i], (intmax_t)fib[i + 1] - 1); else sbuf_printf(s, "%jd\n", (intmax_t)fib[i]); } } sbuf_printf(s, "%20jd | ", (intmax_t)stats->histo[i]); if (stats->histo[i] > 1) sbuf_printf(s, "%jd to %jd\n", (intmax_t)fib[i], (intmax_t)stats->max); else sbuf_printf(s, "%jd\n", (intmax_t)stats->max); } /* * blist_stats() - dump radix tree stats */ void blist_stats(blist_t bl, struct sbuf *s) { struct gap_stats gstats; struct gap_stats *stats = &gstats; daddr_t i, nodes, radix; u_daddr_t bit, diff, mask; init_gap_stats(stats); nodes = 0; i = bl->bl_radix; while (i < bl->bl_radix + bl->bl_blocks) { /* * Find max size subtree starting at i. */ radix = BLIST_BMAP_RADIX; while (((i / radix) & BLIST_META_MASK) == 0) radix *= BLIST_META_RADIX; /* * Check for skippable subtrees starting at i. */ while (radix > BLIST_BMAP_RADIX) { if (bl->bl_root[nodes].bm_bitmap == 0) { if (gap_stats_counting(stats)) update_gap_stats(stats, i); break; } /* * Skip subtree root. */ nodes++; radix /= BLIST_META_RADIX; } if (radix == BLIST_BMAP_RADIX) { /* * Scan leaf. */ mask = bl->bl_root[nodes].bm_bitmap; diff = mask ^ (mask << 1); if (gap_stats_counting(stats)) diff ^= 1; while (diff != 0) { bit = diff & -diff; update_gap_stats(stats, i + bitpos(bit)); diff ^= bit; } } nodes += radix_to_skip(radix); i += radix; } update_gap_stats(stats, i); dump_gap_stats(stats, s); } /************************************************************************ * ALLOCATION SUPPORT FUNCTIONS * ************************************************************************ * * These support functions do all the actual work. They may seem * rather longish, but that's because I've commented them up. The * actual code is straight forward. * */ /* * BLST_NEXT_LEAF_ALLOC() - allocate the first few blocks in the next leaf. * * 'scan' is a leaf node, associated with a block containing 'blk'. * The next leaf node could be adjacent, or several nodes away if the * least common ancestor of 'scan' and its neighbor is several levels * up. Use 'blk' to determine how many meta-nodes lie between the * leaves. If the next leaf has enough initial bits set, clear them * and clear the bits in the meta nodes on the path up to the least * common ancestor to mark any subtrees made completely empty. */ static int blst_next_leaf_alloc(blmeta_t *scan, daddr_t blk, int count) { blmeta_t *next; daddr_t skip; u_daddr_t radix; int digit; next = scan + 1; blk += BLIST_BMAP_RADIX; radix = BLIST_BMAP_RADIX; while ((digit = ((blk / radix) & BLIST_META_MASK)) == 0 && (next->bm_bitmap & 1) == 1) { next++; radix *= BLIST_META_RADIX; } if (((next->bm_bitmap + 1) & ~((u_daddr_t)-1 << count)) != 0) { /* * The next leaf doesn't have enough free blocks at the * beginning to complete the spanning allocation. */ return (ENOMEM); } /* Clear the first 'count' bits in the next leaf to allocate. */ next->bm_bitmap &= (u_daddr_t)-1 << count; /* * Update bitmaps of next-ancestors, up to least common ancestor. */ skip = radix_to_skip(radix); while (radix != BLIST_BMAP_RADIX && next->bm_bitmap == 0) { (--next)->bm_bitmap ^= 1; radix /= BLIST_META_RADIX; } if (next->bm_bitmap == 0) scan[-digit * skip].bm_bitmap ^= (u_daddr_t)1 << digit; return (0); } /* * BLST_LEAF_ALLOC() - allocate at a leaf in the radix tree (a bitmap). * * This is the core of the allocator and is optimized for the * BLIST_BMAP_RADIX block allocation case. Otherwise, execution * time is proportional to log2(count) + bitpos time. */ static daddr_t blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count) { u_daddr_t mask; int count1, hi, lo, num_shifts, range1, range_ext; range1 = 0; count1 = count - 1; num_shifts = fls(count1); mask = scan->bm_bitmap; while ((-mask & ~mask) != 0 && num_shifts > 0) { /* * If bit i is set in mask, then bits in [i, i+range1] are set * in scan->bm_bitmap. The value of range1 is equal to * count1 >> num_shifts. Grow range and reduce num_shifts to 0, * while preserving these invariants. The updates to mask leave * fewer bits set, but each bit that remains set represents a * longer string of consecutive bits set in scan->bm_bitmap. * If more updates to mask cannot clear more bits, because mask * is partitioned with all 0 bits preceding all 1 bits, the loop * terminates immediately. */ num_shifts--; range_ext = range1 + ((count1 >> num_shifts) & 1); /* * mask is a signed quantity for the shift because when it is * shifted right, the sign bit should copied; when the last * block of the leaf is free, pretend, for a while, that all the * blocks that follow it are also free. */ mask &= (daddr_t)mask >> range_ext; range1 += range_ext; } if (mask == 0) { /* * Update bighint. There is no allocation bigger than range1 * starting in this leaf. */ scan->bm_bighint = range1; return (SWAPBLK_NONE); } /* Discard any candidates that appear before blk. */ mask &= (u_daddr_t)-1 << (blk & BLIST_BMAP_MASK); if (mask == 0) return (SWAPBLK_NONE); /* * The least significant set bit in mask marks the start of the first * available range of sufficient size. Clear all the bits but that one, * and then find its position. */ mask &= -mask; lo = bitpos(mask); hi = lo + count; if (hi > BLIST_BMAP_RADIX) { /* * An allocation within this leaf is impossible, so a successful * allocation depends on the next leaf providing some of the blocks. */ if (blst_next_leaf_alloc(scan, blk, hi - BLIST_BMAP_RADIX) != 0) /* * The hint cannot be updated, because the same * allocation request could be satisfied later, by this * leaf, if the state of the next leaf changes, and * without any changes to this leaf. */ return (SWAPBLK_NONE); hi = BLIST_BMAP_RADIX; } /* Set the bits of mask at position 'lo' and higher. */ mask = -mask; if (hi == BLIST_BMAP_RADIX) { /* * Update bighint. There is no allocation bigger than range1 * available in this leaf after this allocation completes. */ scan->bm_bighint = range1; } else { /* Clear the bits of mask at position 'hi' and higher. */ mask &= (u_daddr_t)-1 >> (BLIST_BMAP_RADIX - hi); } /* Clear the allocated bits from this leaf. */ scan->bm_bitmap &= ~mask; return ((blk & ~BLIST_BMAP_MASK) + lo); } /* * blist_meta_alloc() - allocate at a meta in the radix tree. * * Attempt to allocate at a meta node. If we can't, we update * bighint and return a failure. Updating bighint optimize future * calls that hit this node. We have to check for our collapse cases * and we have a few optimizations strewn in as well. */ static daddr_t blst_meta_alloc(blmeta_t *scan, daddr_t cursor, daddr_t count, u_daddr_t radix) { daddr_t blk, i, r, skip; u_daddr_t bit, mask; bool scan_from_start; int digit; if (radix == BLIST_BMAP_RADIX) return (blst_leaf_alloc(scan, cursor, count)); blk = cursor & -radix; scan_from_start = (cursor == blk); radix /= BLIST_META_RADIX; skip = radix_to_skip(radix); mask = scan->bm_bitmap; /* Discard any candidates that appear before cursor. */ digit = (cursor / radix) & BLIST_META_MASK; mask &= (u_daddr_t)-1 << digit; + if (mask == 0) + return (SWAPBLK_NONE); /* * If the first try is for a block that includes the cursor, pre-undo * the digit * radix offset in the first call; otherwise, ignore the * cursor entirely. */ if (((mask >> digit) & 1) == 1) cursor -= digit * radix; else cursor = blk; /* * Examine the nonempty subtree associated with each bit set in mask. */ do { bit = mask & -mask; digit = bitpos(bit); i = 1 + digit * skip; if (count <= scan[i].bm_bighint) { /* * The allocation might fit beginning in the i'th subtree. */ r = blst_meta_alloc(&scan[i], cursor + digit * radix, count, radix); if (r != SWAPBLK_NONE) { if (scan[i].bm_bitmap == 0) scan->bm_bitmap ^= bit; return (r); } } cursor = blk; } while ((mask ^= bit) != 0); /* * We couldn't allocate count in this subtree. If the whole tree was * scanned, and the last tree node is allocated, update bighint. */ if (scan_from_start && !(digit == BLIST_META_RADIX - 1 && scan[i].bm_bighint == BLIST_MAX_ALLOC)) scan->bm_bighint = count - 1; return (SWAPBLK_NONE); } /* * BLST_LEAF_FREE() - free allocated block from leaf bitmap * */ static void blst_leaf_free(blmeta_t *scan, daddr_t blk, int count) { u_daddr_t mask; /* * free some data in this bitmap * mask=0000111111111110000 * \_________/\__/ * count n */ mask = bitrange(blk & BLIST_BMAP_MASK, count); if (scan->bm_bitmap & mask) panic("freeing free block"); scan->bm_bitmap |= mask; } /* * BLST_META_FREE() - free allocated blocks from radix tree meta info * * This support routine frees a range of blocks from the bitmap. * The range must be entirely enclosed by this radix node. If a * meta node, we break the range down recursively to free blocks * in subnodes (which means that this code can free an arbitrary * range whereas the allocation code cannot allocate an arbitrary * range). */ static void blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, u_daddr_t radix) { daddr_t blk, endBlk, i, skip; int digit, endDigit; /* * We could probably do a better job here. We are required to make * bighint at least as large as the biggest allocable block of data. * If we just shoehorn it, a little extra overhead will be incurred * on the next allocation (but only that one typically). */ scan->bm_bighint = BLIST_MAX_ALLOC; if (radix == BLIST_BMAP_RADIX) return (blst_leaf_free(scan, freeBlk, count)); endBlk = ummin(freeBlk + count, (freeBlk + radix) & -radix); radix /= BLIST_META_RADIX; skip = radix_to_skip(radix); blk = freeBlk & -radix; digit = (blk / radix) & BLIST_META_MASK; endDigit = 1 + (((endBlk - 1) / radix) & BLIST_META_MASK); scan->bm_bitmap |= bitrange(digit, endDigit - digit); for (i = 1 + digit * skip; blk < endBlk; i += skip) { blk += radix; count = ummin(blk, endBlk) - freeBlk; blst_meta_free(&scan[i], freeBlk, count, radix); freeBlk = blk; } } /* * BLST_COPY() - copy one radix tree to another * * Locates free space in the source tree and frees it in the destination * tree. The space may not already be free in the destination. */ static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, blist_t dest, daddr_t count) { daddr_t endBlk, i, skip; /* * Leaf node */ if (radix == BLIST_BMAP_RADIX) { u_daddr_t v = scan->bm_bitmap; if (v == (u_daddr_t)-1) { blist_free(dest, blk, count); } else if (v != 0) { int i; for (i = 0; i < count; ++i) { if (v & ((u_daddr_t)1 << i)) blist_free(dest, blk + i, 1); } } return; } /* * Meta node */ if (scan->bm_bitmap == 0) { /* * Source all allocated, leave dest allocated */ return; } endBlk = blk + count; radix /= BLIST_META_RADIX; skip = radix_to_skip(radix); for (i = 1; blk < endBlk; i += skip) { blk += radix; count = radix; if (blk >= endBlk) count -= blk - endBlk; blst_copy(&scan[i], blk - radix, radix, dest, count); } } /* * BLST_LEAF_FILL() - allocate specific blocks in leaf bitmap * * This routine allocates all blocks in the specified range * regardless of any existing allocations in that range. Returns * the number of blocks allocated by the call. */ static daddr_t blst_leaf_fill(blmeta_t *scan, daddr_t blk, int count) { daddr_t nblks; u_daddr_t mask; mask = bitrange(blk & BLIST_BMAP_MASK, count); /* Count the number of blocks that we are allocating. */ nblks = bitcount64(scan->bm_bitmap & mask); scan->bm_bitmap &= ~mask; return (nblks); } /* * BLIST_META_FILL() - allocate specific blocks at a meta node * * This routine allocates the specified range of blocks, * regardless of any existing allocations in the range. The * range must be within the extent of this node. Returns the * number of blocks allocated by the call. */ static daddr_t blst_meta_fill(blmeta_t *scan, daddr_t allocBlk, daddr_t count, u_daddr_t radix) { daddr_t blk, endBlk, i, nblks, skip; int digit; if (radix == BLIST_BMAP_RADIX) return (blst_leaf_fill(scan, allocBlk, count)); endBlk = ummin(allocBlk + count, (allocBlk + radix) & -radix); radix /= BLIST_META_RADIX; skip = radix_to_skip(radix); blk = allocBlk & -radix; nblks = 0; while (blk < endBlk) { digit = (blk / radix) & BLIST_META_MASK; i = 1 + digit * skip; blk += radix; count = ummin(blk, endBlk) - allocBlk; nblks += blst_meta_fill(&scan[i], allocBlk, count, radix); if (scan[i].bm_bitmap == 0) scan->bm_bitmap &= ~((u_daddr_t)1 << digit); allocBlk = blk; } return (nblks); } #ifdef BLIST_DEBUG static void blst_radix_print(blmeta_t *scan, daddr_t blk, daddr_t radix, int tab) { daddr_t skip; u_daddr_t bit, mask; int digit; if (radix == BLIST_BMAP_RADIX) { printf( "%*.*s(%08llx,%lld): bitmap %0*llx big=%lld\n", tab, tab, "", (long long)blk, (long long)radix, 1 + (BLIST_BMAP_RADIX - 1) / 4, (long long)scan->bm_bitmap, (long long)scan->bm_bighint ); return; } printf( "%*.*s(%08llx): subtree (%lld/%lld) bitmap %0*llx big=%lld {\n", tab, tab, "", (long long)blk, (long long)radix, (long long)radix, 1 + (BLIST_META_RADIX - 1) / 4, (long long)scan->bm_bitmap, (long long)scan->bm_bighint ); radix /= BLIST_META_RADIX; skip = radix_to_skip(radix); tab += 4; mask = scan->bm_bitmap; /* Examine the nonempty subtree associated with each bit set in mask */ do { bit = mask & -mask; digit = bitpos(bit); blst_radix_print(&scan[1 + digit * skip], blk + digit * radix, radix, tab); } while ((mask ^= bit) != 0); tab -= 4; printf( "%*.*s}\n", tab, tab, "" ); } #endif #ifdef BLIST_DEBUG int main(int ac, char **av) { int size = BLIST_META_RADIX * BLIST_BMAP_RADIX; int i; blist_t bl; struct sbuf *s; for (i = 1; i < ac; ++i) { const char *ptr = av[i]; if (*ptr != '-') { size = strtol(ptr, NULL, 0); continue; } ptr += 2; fprintf(stderr, "Bad option: %s\n", ptr - 2); exit(1); } bl = blist_create(size, M_WAITOK); blist_free(bl, 0, size); for (;;) { char buf[1024]; long long da = 0; long long count = 0; printf("%lld/%lld/%lld> ", (long long)blist_avail(bl), (long long)size, (long long)bl->bl_radix); fflush(stdout); if (fgets(buf, sizeof(buf), stdin) == NULL) break; switch(buf[0]) { case 'r': if (sscanf(buf + 1, "%lld", &count) == 1) { blist_resize(&bl, count, 1, M_WAITOK); } else { printf("?\n"); } case 'p': blist_print(bl); break; case 's': s = sbuf_new_auto(); blist_stats(bl, s); sbuf_finish(s); printf("%s", sbuf_data(s)); sbuf_delete(s); break; case 'a': if (sscanf(buf + 1, "%lld", &count) == 1) { daddr_t blk = blist_alloc(bl, count); printf(" R=%08llx\n", (long long)blk); } else { printf("?\n"); } break; case 'f': if (sscanf(buf + 1, "%llx %lld", &da, &count) == 2) { blist_free(bl, da, count); } else { printf("?\n"); } break; case 'l': if (sscanf(buf + 1, "%llx %lld", &da, &count) == 2) { printf(" n=%jd\n", (intmax_t)blist_fill(bl, da, count)); } else { printf("?\n"); } break; case '?': case 'h': puts( "p -print\n" "s -stats\n" "a %d -allocate\n" "f %x %d -free\n" "l %x %d -fill\n" "r %d -resize\n" "h/? -help" ); break; default: printf("?\n"); break; } } return(0); } void panic(const char *ctl, ...) { va_list va; va_start(va, ctl); vfprintf(stderr, ctl, va); fprintf(stderr, "\n"); va_end(va); exit(1); } #endif Index: projects/clang700-import/sys/kern/vfs_aio.c =================================================================== --- projects/clang700-import/sys/kern/vfs_aio.c (revision 340917) +++ projects/clang700-import/sys/kern/vfs_aio.c (revision 340918) @@ -1,2985 +1,2988 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997 John S. Dyson. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. John S. Dyson's name may not be used to endorse or promote products * derived from this software without specific prior written permission. * * DISCLAIMER: This code isn't warranted to do anything useful. Anything * bad that happens because of using this software isn't the responsibility * of the author. This software is distributed AS-IS. */ /* * This file contains support for the POSIX 1003.1B AIO/LIO facility. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Counter for allocating reference ids to new jobs. Wrapped to 1 on * overflow. (XXX will be removed soon.) */ static u_long jobrefid; /* * Counter for aio_fsync. */ static uint64_t jobseqno; #ifndef MAX_AIO_PER_PROC #define MAX_AIO_PER_PROC 32 #endif #ifndef MAX_AIO_QUEUE_PER_PROC #define MAX_AIO_QUEUE_PER_PROC 256 #endif #ifndef MAX_AIO_QUEUE #define MAX_AIO_QUEUE 1024 /* Bigger than MAX_AIO_QUEUE_PER_PROC */ #endif #ifndef MAX_BUF_AIO #define MAX_BUF_AIO 16 #endif FEATURE(aio, "Asynchronous I/O"); SYSCTL_DECL(_p1003_1b); static MALLOC_DEFINE(M_LIO, "lio", "listio aio control block list"); static MALLOC_DEFINE(M_AIOS, "aios", "aio_suspend aio control block list"); static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management"); static int enable_aio_unsafe = 0; SYSCTL_INT(_vfs_aio, OID_AUTO, enable_unsafe, CTLFLAG_RW, &enable_aio_unsafe, 0, "Permit asynchronous IO on all file types, not just known-safe types"); static unsigned int unsafe_warningcnt = 1; SYSCTL_UINT(_vfs_aio, OID_AUTO, unsafe_warningcnt, CTLFLAG_RW, &unsafe_warningcnt, 0, "Warnings that will be triggered upon failed IO requests on unsafe files"); static int max_aio_procs = MAX_AIO_PROCS; SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs, CTLFLAG_RW, &max_aio_procs, 0, "Maximum number of kernel processes to use for handling async IO "); static int num_aio_procs = 0; SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs, CTLFLAG_RD, &num_aio_procs, 0, "Number of presently active kernel processes for async IO"); /* * The code will adjust the actual number of AIO processes towards this * number when it gets a chance. */ static int target_aio_procs = TARGET_AIO_PROCS; SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs, 0, "Preferred number of ready kernel processes for async IO"); static int max_queue_count = MAX_AIO_QUEUE; SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0, "Maximum number of aio requests to queue, globally"); static int num_queue_count = 0; SYSCTL_INT(_vfs_aio, OID_AUTO, num_queue_count, CTLFLAG_RD, &num_queue_count, 0, "Number of queued aio requests"); static int num_buf_aio = 0; SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0, "Number of aio requests presently handled by the buf subsystem"); static int num_unmapped_aio = 0; SYSCTL_INT(_vfs_aio, OID_AUTO, num_unmapped_aio, CTLFLAG_RD, &num_unmapped_aio, 0, "Number of aio requests presently handled by unmapped I/O buffers"); /* Number of async I/O processes in the process of being started */ /* XXX This should be local to aio_aqueue() */ static int num_aio_resv_start = 0; static int aiod_lifetime; SYSCTL_INT(_vfs_aio, OID_AUTO, aiod_lifetime, CTLFLAG_RW, &aiod_lifetime, 0, "Maximum lifetime for idle aiod"); static int max_aio_per_proc = MAX_AIO_PER_PROC; SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_per_proc, CTLFLAG_RW, &max_aio_per_proc, 0, "Maximum active aio requests per process"); static int max_aio_queue_per_proc = MAX_AIO_QUEUE_PER_PROC; SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue_per_proc, CTLFLAG_RW, &max_aio_queue_per_proc, 0, "Maximum queued aio requests per process"); static int max_buf_aio = MAX_BUF_AIO; SYSCTL_INT(_vfs_aio, OID_AUTO, max_buf_aio, CTLFLAG_RW, &max_buf_aio, 0, "Maximum buf aio requests per process"); /* * Though redundant with vfs.aio.max_aio_queue_per_proc, POSIX requires * sysconf(3) to support AIO_LISTIO_MAX, and we implement that with * vfs.aio.aio_listio_max. */ SYSCTL_INT(_p1003_1b, CTL_P1003_1B_AIO_LISTIO_MAX, aio_listio_max, CTLFLAG_RD | CTLFLAG_CAPRD, &max_aio_queue_per_proc, 0, "Maximum aio requests for a single lio_listio call"); #ifdef COMPAT_FREEBSD6 typedef struct oaiocb { int aio_fildes; /* File descriptor */ off_t aio_offset; /* File offset for I/O */ volatile void *aio_buf; /* I/O buffer in process space */ size_t aio_nbytes; /* Number of bytes for I/O */ struct osigevent aio_sigevent; /* Signal to deliver */ int aio_lio_opcode; /* LIO opcode */ int aio_reqprio; /* Request priority -- ignored */ struct __aiocb_private _aiocb_private; } oaiocb_t; #endif /* * Below is a key of locks used to protect each member of struct kaiocb * aioliojob and kaioinfo and any backends. * * * - need not protected * a - locked by kaioinfo lock * b - locked by backend lock, the backend lock can be null in some cases, * for example, BIO belongs to this type, in this case, proc lock is * reused. * c - locked by aio_job_mtx, the lock for the generic file I/O backend. */ /* * If the routine that services an AIO request blocks while running in an * AIO kernel process it can starve other I/O requests. BIO requests * queued via aio_qphysio() complete in GEOM and do not use AIO kernel * processes at all. Socket I/O requests use a separate pool of * kprocs and also force non-blocking I/O. Other file I/O requests * use the generic fo_read/fo_write operations which can block. The * fsync and mlock operations can also block while executing. Ideally * none of these requests would block while executing. * * Note that the service routines cannot toggle O_NONBLOCK in the file * structure directly while handling a request due to races with * userland threads. */ /* jobflags */ #define KAIOCB_QUEUEING 0x01 #define KAIOCB_CANCELLED 0x02 #define KAIOCB_CANCELLING 0x04 #define KAIOCB_CHECKSYNC 0x08 #define KAIOCB_CLEARED 0x10 #define KAIOCB_FINISHED 0x20 /* * AIO process info */ #define AIOP_FREE 0x1 /* proc on free queue */ struct aioproc { int aioprocflags; /* (c) AIO proc flags */ TAILQ_ENTRY(aioproc) list; /* (c) list of processes */ struct proc *aioproc; /* (*) the AIO proc */ }; /* * data-structure for lio signal management */ struct aioliojob { int lioj_flags; /* (a) listio flags */ int lioj_count; /* (a) listio flags */ int lioj_finished_count; /* (a) listio flags */ struct sigevent lioj_signal; /* (a) signal on all I/O done */ TAILQ_ENTRY(aioliojob) lioj_list; /* (a) lio list */ struct knlist klist; /* (a) list of knotes */ ksiginfo_t lioj_ksi; /* (a) Realtime signal info */ }; #define LIOJ_SIGNAL 0x1 /* signal on all done (lio) */ #define LIOJ_SIGNAL_POSTED 0x2 /* signal has been posted */ #define LIOJ_KEVENT_POSTED 0x4 /* kevent triggered */ /* * per process aio data structure */ struct kaioinfo { struct mtx kaio_mtx; /* the lock to protect this struct */ int kaio_flags; /* (a) per process kaio flags */ int kaio_active_count; /* (c) number of currently used AIOs */ int kaio_count; /* (a) size of AIO queue */ int kaio_buffer_count; /* (a) number of physio buffers */ TAILQ_HEAD(,kaiocb) kaio_all; /* (a) all AIOs in a process */ TAILQ_HEAD(,kaiocb) kaio_done; /* (a) done queue for process */ TAILQ_HEAD(,aioliojob) kaio_liojoblist; /* (a) list of lio jobs */ TAILQ_HEAD(,kaiocb) kaio_jobqueue; /* (a) job queue for process */ TAILQ_HEAD(,kaiocb) kaio_syncqueue; /* (a) queue for aio_fsync */ TAILQ_HEAD(,kaiocb) kaio_syncready; /* (a) second q for aio_fsync */ struct task kaio_task; /* (*) task to kick aio processes */ struct task kaio_sync_task; /* (*) task to schedule fsync jobs */ }; #define AIO_LOCK(ki) mtx_lock(&(ki)->kaio_mtx) #define AIO_UNLOCK(ki) mtx_unlock(&(ki)->kaio_mtx) #define AIO_LOCK_ASSERT(ki, f) mtx_assert(&(ki)->kaio_mtx, (f)) #define AIO_MTX(ki) (&(ki)->kaio_mtx) #define KAIO_RUNDOWN 0x1 /* process is being run down */ #define KAIO_WAKEUP 0x2 /* wakeup process when AIO completes */ /* * Operations used to interact with userland aio control blocks. * Different ABIs provide their own operations. */ struct aiocb_ops { int (*copyin)(struct aiocb *ujob, struct aiocb *kjob); long (*fetch_status)(struct aiocb *ujob); long (*fetch_error)(struct aiocb *ujob); int (*store_status)(struct aiocb *ujob, long status); int (*store_error)(struct aiocb *ujob, long error); int (*store_kernelinfo)(struct aiocb *ujob, long jobref); int (*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob); }; static TAILQ_HEAD(,aioproc) aio_freeproc; /* (c) Idle daemons */ static struct sema aio_newproc_sem; static struct mtx aio_job_mtx; static TAILQ_HEAD(,kaiocb) aio_jobs; /* (c) Async job list */ static struct unrhdr *aiod_unr; void aio_init_aioinfo(struct proc *p); static int aio_onceonly(void); static int aio_free_entry(struct kaiocb *job); static void aio_process_rw(struct kaiocb *job); static void aio_process_sync(struct kaiocb *job); static void aio_process_mlock(struct kaiocb *job); static void aio_schedule_fsync(void *context, int pending); static int aio_newproc(int *); int aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lio, int type, struct aiocb_ops *ops); static int aio_queue_file(struct file *fp, struct kaiocb *job); static void aio_physwakeup(struct bio *bp); static void aio_proc_rundown(void *arg, struct proc *p); static void aio_proc_rundown_exec(void *arg, struct proc *p, struct image_params *imgp); static int aio_qphysio(struct proc *p, struct kaiocb *job); static void aio_daemon(void *param); static void aio_bio_done_notify(struct proc *userp, struct kaiocb *job); static bool aio_clear_cancel_function_locked(struct kaiocb *job); static int aio_kick(struct proc *userp); static void aio_kick_nowait(struct proc *userp); static void aio_kick_helper(void *context, int pending); static int filt_aioattach(struct knote *kn); static void filt_aiodetach(struct knote *kn); static int filt_aio(struct knote *kn, long hint); static int filt_lioattach(struct knote *kn); static void filt_liodetach(struct knote *kn); static int filt_lio(struct knote *kn, long hint); /* * Zones for: * kaio Per process async io info * aiop async io process data * aiocb async io jobs * aiolio list io jobs */ static uma_zone_t kaio_zone, aiop_zone, aiocb_zone, aiolio_zone; /* kqueue filters for aio */ static struct filterops aio_filtops = { .f_isfd = 0, .f_attach = filt_aioattach, .f_detach = filt_aiodetach, .f_event = filt_aio, }; static struct filterops lio_filtops = { .f_isfd = 0, .f_attach = filt_lioattach, .f_detach = filt_liodetach, .f_event = filt_lio }; static eventhandler_tag exit_tag, exec_tag; TASKQUEUE_DEFINE_THREAD(aiod_kick); /* * Main operations function for use as a kernel module. */ static int aio_modload(struct module *module, int cmd, void *arg) { int error = 0; switch (cmd) { case MOD_LOAD: aio_onceonly(); break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } static moduledata_t aio_mod = { "aio", &aio_modload, NULL }; DECLARE_MODULE(aio, aio_mod, SI_SUB_VFS, SI_ORDER_ANY); MODULE_VERSION(aio, 1); /* * Startup initialization */ static int aio_onceonly(void) { exit_tag = EVENTHANDLER_REGISTER(process_exit, aio_proc_rundown, NULL, EVENTHANDLER_PRI_ANY); exec_tag = EVENTHANDLER_REGISTER(process_exec, aio_proc_rundown_exec, NULL, EVENTHANDLER_PRI_ANY); kqueue_add_filteropts(EVFILT_AIO, &aio_filtops); kqueue_add_filteropts(EVFILT_LIO, &lio_filtops); TAILQ_INIT(&aio_freeproc); sema_init(&aio_newproc_sem, 0, "aio_new_proc"); mtx_init(&aio_job_mtx, "aio_job", NULL, MTX_DEF); TAILQ_INIT(&aio_jobs); aiod_unr = new_unrhdr(1, INT_MAX, NULL); kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); aiocb_zone = uma_zcreate("AIOCB", sizeof(struct kaiocb), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); aiolio_zone = uma_zcreate("AIOLIO", sizeof(struct aioliojob), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); aiod_lifetime = AIOD_LIFETIME_DEFAULT; jobrefid = 1; p31b_setcfg(CTL_P1003_1B_ASYNCHRONOUS_IO, _POSIX_ASYNCHRONOUS_IO); p31b_setcfg(CTL_P1003_1B_AIO_MAX, MAX_AIO_QUEUE); p31b_setcfg(CTL_P1003_1B_AIO_PRIO_DELTA_MAX, 0); return (0); } /* * Init the per-process aioinfo structure. The aioinfo limits are set * per-process for user limit (resource) management. */ void aio_init_aioinfo(struct proc *p) { struct kaioinfo *ki; ki = uma_zalloc(kaio_zone, M_WAITOK); mtx_init(&ki->kaio_mtx, "aiomtx", NULL, MTX_DEF | MTX_NEW); ki->kaio_flags = 0; ki->kaio_active_count = 0; ki->kaio_count = 0; ki->kaio_buffer_count = 0; TAILQ_INIT(&ki->kaio_all); TAILQ_INIT(&ki->kaio_done); TAILQ_INIT(&ki->kaio_jobqueue); TAILQ_INIT(&ki->kaio_liojoblist); TAILQ_INIT(&ki->kaio_syncqueue); TAILQ_INIT(&ki->kaio_syncready); TASK_INIT(&ki->kaio_task, 0, aio_kick_helper, p); TASK_INIT(&ki->kaio_sync_task, 0, aio_schedule_fsync, ki); PROC_LOCK(p); if (p->p_aioinfo == NULL) { p->p_aioinfo = ki; PROC_UNLOCK(p); } else { PROC_UNLOCK(p); mtx_destroy(&ki->kaio_mtx); uma_zfree(kaio_zone, ki); } while (num_aio_procs < MIN(target_aio_procs, max_aio_procs)) aio_newproc(NULL); } static int aio_sendsig(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi) { struct thread *td; int error; error = sigev_findtd(p, sigev, &td); if (error) return (error); if (!KSI_ONQ(ksi)) { ksiginfo_set_sigev(ksi, sigev); ksi->ksi_code = SI_ASYNCIO; ksi->ksi_flags |= KSI_EXT | KSI_INS; tdsendsignal(p, td, ksi->ksi_signo, ksi); } PROC_UNLOCK(p); return (error); } /* * Free a job entry. Wait for completion if it is currently active, but don't * delay forever. If we delay, we return a flag that says that we have to * restart the queue scan. */ static int aio_free_entry(struct kaiocb *job) { struct kaioinfo *ki; struct aioliojob *lj; struct proc *p; p = job->userproc; MPASS(curproc == p); ki = p->p_aioinfo; MPASS(ki != NULL); AIO_LOCK_ASSERT(ki, MA_OWNED); MPASS(job->jobflags & KAIOCB_FINISHED); atomic_subtract_int(&num_queue_count, 1); ki->kaio_count--; MPASS(ki->kaio_count >= 0); TAILQ_REMOVE(&ki->kaio_done, job, plist); TAILQ_REMOVE(&ki->kaio_all, job, allist); lj = job->lio; if (lj) { lj->lioj_count--; lj->lioj_finished_count--; if (lj->lioj_count == 0) { TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); /* lio is going away, we need to destroy any knotes */ knlist_delete(&lj->klist, curthread, 1); PROC_LOCK(p); sigqueue_take(&lj->lioj_ksi); PROC_UNLOCK(p); uma_zfree(aiolio_zone, lj); } } /* job is going away, we need to destroy any knotes */ knlist_delete(&job->klist, curthread, 1); PROC_LOCK(p); sigqueue_take(&job->ksi); PROC_UNLOCK(p); AIO_UNLOCK(ki); /* * The thread argument here is used to find the owning process * and is also passed to fo_close() which may pass it to various * places such as devsw close() routines. Because of that, we * need a thread pointer from the process owning the job that is * persistent and won't disappear out from under us or move to * another process. * * Currently, all the callers of this function call it to remove * a kaiocb from the current process' job list either via a * syscall or due to the current process calling exit() or * execve(). Thus, we know that p == curproc. We also know that * curthread can't exit since we are curthread. * * Therefore, we use curthread as the thread to pass to * knlist_delete(). This does mean that it is possible for the * thread pointer at close time to differ from the thread pointer * at open time, but this is already true of file descriptors in * a multithreaded process. */ if (job->fd_file) fdrop(job->fd_file, curthread); crfree(job->cred); uma_zfree(aiocb_zone, job); AIO_LOCK(ki); return (0); } static void aio_proc_rundown_exec(void *arg, struct proc *p, struct image_params *imgp __unused) { aio_proc_rundown(arg, p); } static int aio_cancel_job(struct proc *p, struct kaioinfo *ki, struct kaiocb *job) { aio_cancel_fn_t *func; int cancelled; AIO_LOCK_ASSERT(ki, MA_OWNED); if (job->jobflags & (KAIOCB_CANCELLED | KAIOCB_FINISHED)) return (0); MPASS((job->jobflags & KAIOCB_CANCELLING) == 0); job->jobflags |= KAIOCB_CANCELLED; func = job->cancel_fn; /* * If there is no cancel routine, just leave the job marked as * cancelled. The job should be in active use by a caller who * should complete it normally or when it fails to install a * cancel routine. */ if (func == NULL) return (0); /* * Set the CANCELLING flag so that aio_complete() will defer * completions of this job. This prevents the job from being * freed out from under the cancel callback. After the * callback any deferred completion (whether from the callback * or any other source) will be completed. */ job->jobflags |= KAIOCB_CANCELLING; AIO_UNLOCK(ki); func(job); AIO_LOCK(ki); job->jobflags &= ~KAIOCB_CANCELLING; if (job->jobflags & KAIOCB_FINISHED) { cancelled = job->uaiocb._aiocb_private.error == ECANCELED; TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); aio_bio_done_notify(p, job); } else { /* * The cancel callback might have scheduled an * operation to cancel this request, but it is * only counted as cancelled if the request is * cancelled when the callback returns. */ cancelled = 0; } return (cancelled); } /* * Rundown the jobs for a given process. */ static void aio_proc_rundown(void *arg, struct proc *p) { struct kaioinfo *ki; struct aioliojob *lj; struct kaiocb *job, *jobn; KASSERT(curthread->td_proc == p, ("%s: called on non-curproc", __func__)); ki = p->p_aioinfo; if (ki == NULL) return; AIO_LOCK(ki); ki->kaio_flags |= KAIO_RUNDOWN; restart: /* * Try to cancel all pending requests. This code simulates * aio_cancel on all pending I/O requests. */ TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) { aio_cancel_job(p, ki, job); } /* Wait for all running I/O to be finished */ if (TAILQ_FIRST(&ki->kaio_jobqueue) || ki->kaio_active_count != 0) { ki->kaio_flags |= KAIO_WAKEUP; msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO, "aioprn", hz); goto restart; } /* Free all completed I/O requests. */ while ((job = TAILQ_FIRST(&ki->kaio_done)) != NULL) aio_free_entry(job); while ((lj = TAILQ_FIRST(&ki->kaio_liojoblist)) != NULL) { if (lj->lioj_count == 0) { TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); knlist_delete(&lj->klist, curthread, 1); PROC_LOCK(p); sigqueue_take(&lj->lioj_ksi); PROC_UNLOCK(p); uma_zfree(aiolio_zone, lj); } else { panic("LIO job not cleaned up: C:%d, FC:%d\n", lj->lioj_count, lj->lioj_finished_count); } } AIO_UNLOCK(ki); taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_task); taskqueue_drain(taskqueue_aiod_kick, &ki->kaio_sync_task); mtx_destroy(&ki->kaio_mtx); uma_zfree(kaio_zone, ki); p->p_aioinfo = NULL; } /* * Select a job to run (called by an AIO daemon). */ static struct kaiocb * aio_selectjob(struct aioproc *aiop) { struct kaiocb *job; struct kaioinfo *ki; struct proc *userp; mtx_assert(&aio_job_mtx, MA_OWNED); restart: TAILQ_FOREACH(job, &aio_jobs, list) { userp = job->userproc; ki = userp->p_aioinfo; if (ki->kaio_active_count < max_aio_per_proc) { TAILQ_REMOVE(&aio_jobs, job, list); if (!aio_clear_cancel_function(job)) goto restart; /* Account for currently active jobs. */ ki->kaio_active_count++; break; } } return (job); } /* * Move all data to a permanent storage device. This code * simulates the fsync syscall. */ static int aio_fsync_vnode(struct thread *td, struct vnode *vp) { struct mount *mp; int error; if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) goto drop; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if (vp->v_object != NULL) { VM_OBJECT_WLOCK(vp->v_object); vm_object_page_clean(vp->v_object, 0, 0, 0); VM_OBJECT_WUNLOCK(vp->v_object); } error = VOP_FSYNC(vp, MNT_WAIT, td); VOP_UNLOCK(vp, 0); vn_finished_write(mp); drop: return (error); } /* * The AIO processing activity for LIO_READ/LIO_WRITE. This is the code that * does the I/O request for the non-physio version of the operations. The * normal vn operations are used, and this code should work in all instances * for every type of file, including pipes, sockets, fifos, and regular files. * * XXX I don't think it works well for socket, pipe, and fifo. */ static void aio_process_rw(struct kaiocb *job) { struct ucred *td_savedcred; struct thread *td; struct aiocb *cb; struct file *fp; struct uio auio; struct iovec aiov; ssize_t cnt; long msgsnd_st, msgsnd_end; long msgrcv_st, msgrcv_end; long oublock_st, oublock_end; long inblock_st, inblock_end; int error; KASSERT(job->uaiocb.aio_lio_opcode == LIO_READ || job->uaiocb.aio_lio_opcode == LIO_WRITE, ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); aio_switch_vmspace(job); td = curthread; td_savedcred = td->td_ucred; td->td_ucred = job->cred; cb = &job->uaiocb; fp = job->fd_file; aiov.iov_base = (void *)(uintptr_t)cb->aio_buf; aiov.iov_len = cb->aio_nbytes; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = cb->aio_offset; auio.uio_resid = cb->aio_nbytes; cnt = cb->aio_nbytes; auio.uio_segflg = UIO_USERSPACE; auio.uio_td = td; msgrcv_st = td->td_ru.ru_msgrcv; msgsnd_st = td->td_ru.ru_msgsnd; inblock_st = td->td_ru.ru_inblock; oublock_st = td->td_ru.ru_oublock; /* * aio_aqueue() acquires a reference to the file that is * released in aio_free_entry(). */ if (cb->aio_lio_opcode == LIO_READ) { auio.uio_rw = UIO_READ; if (auio.uio_resid == 0) error = 0; else error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, td); } else { if (fp->f_type == DTYPE_VNODE) bwillwrite(); auio.uio_rw = UIO_WRITE; error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, td); } msgrcv_end = td->td_ru.ru_msgrcv; msgsnd_end = td->td_ru.ru_msgsnd; inblock_end = td->td_ru.ru_inblock; oublock_end = td->td_ru.ru_oublock; job->msgrcv = msgrcv_end - msgrcv_st; job->msgsnd = msgsnd_end - msgsnd_st; job->inblock = inblock_end - inblock_st; job->outblock = oublock_end - oublock_st; if ((error) && (auio.uio_resid != cnt)) { if (error == ERESTART || error == EINTR || error == EWOULDBLOCK) error = 0; if ((error == EPIPE) && (cb->aio_lio_opcode == LIO_WRITE)) { PROC_LOCK(job->userproc); kern_psignal(job->userproc, SIGPIPE); PROC_UNLOCK(job->userproc); } } cnt -= auio.uio_resid; td->td_ucred = td_savedcred; if (error) aio_complete(job, -1, error); else aio_complete(job, cnt, 0); } static void aio_process_sync(struct kaiocb *job) { struct thread *td = curthread; struct ucred *td_savedcred = td->td_ucred; struct file *fp = job->fd_file; int error = 0; KASSERT(job->uaiocb.aio_lio_opcode == LIO_SYNC, ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); td->td_ucred = job->cred; if (fp->f_vnode != NULL) error = aio_fsync_vnode(td, fp->f_vnode); td->td_ucred = td_savedcred; if (error) aio_complete(job, -1, error); else aio_complete(job, 0, 0); } static void aio_process_mlock(struct kaiocb *job) { struct aiocb *cb = &job->uaiocb; int error; KASSERT(job->uaiocb.aio_lio_opcode == LIO_MLOCK, ("%s: opcode %d", __func__, job->uaiocb.aio_lio_opcode)); aio_switch_vmspace(job); error = kern_mlock(job->userproc, job->cred, __DEVOLATILE(uintptr_t, cb->aio_buf), cb->aio_nbytes); aio_complete(job, error != 0 ? -1 : 0, error); } static void aio_bio_done_notify(struct proc *userp, struct kaiocb *job) { struct aioliojob *lj; struct kaioinfo *ki; struct kaiocb *sjob, *sjobn; int lj_done; bool schedule_fsync; ki = userp->p_aioinfo; AIO_LOCK_ASSERT(ki, MA_OWNED); lj = job->lio; lj_done = 0; if (lj) { lj->lioj_finished_count++; if (lj->lioj_count == lj->lioj_finished_count) lj_done = 1; } TAILQ_INSERT_TAIL(&ki->kaio_done, job, plist); MPASS(job->jobflags & KAIOCB_FINISHED); if (ki->kaio_flags & KAIO_RUNDOWN) goto notification_done; if (job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) aio_sendsig(userp, &job->uaiocb.aio_sigevent, &job->ksi); KNOTE_LOCKED(&job->klist, 1); if (lj_done) { if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { lj->lioj_flags |= LIOJ_KEVENT_POSTED; KNOTE_LOCKED(&lj->klist, 1); } if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) { aio_sendsig(userp, &lj->lioj_signal, &lj->lioj_ksi); lj->lioj_flags |= LIOJ_SIGNAL_POSTED; } } notification_done: if (job->jobflags & KAIOCB_CHECKSYNC) { schedule_fsync = false; TAILQ_FOREACH_SAFE(sjob, &ki->kaio_syncqueue, list, sjobn) { if (job->fd_file != sjob->fd_file || job->seqno >= sjob->seqno) continue; if (--sjob->pending > 0) continue; TAILQ_REMOVE(&ki->kaio_syncqueue, sjob, list); if (!aio_clear_cancel_function_locked(sjob)) continue; TAILQ_INSERT_TAIL(&ki->kaio_syncready, sjob, list); schedule_fsync = true; } if (schedule_fsync) taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_sync_task); } if (ki->kaio_flags & KAIO_WAKEUP) { ki->kaio_flags &= ~KAIO_WAKEUP; wakeup(&userp->p_aioinfo); } } static void aio_schedule_fsync(void *context, int pending) { struct kaioinfo *ki; struct kaiocb *job; ki = context; AIO_LOCK(ki); while (!TAILQ_EMPTY(&ki->kaio_syncready)) { job = TAILQ_FIRST(&ki->kaio_syncready); TAILQ_REMOVE(&ki->kaio_syncready, job, list); AIO_UNLOCK(ki); aio_schedule(job, aio_process_sync); AIO_LOCK(ki); } AIO_UNLOCK(ki); } bool aio_cancel_cleared(struct kaiocb *job) { /* * The caller should hold the same queue lock held when * aio_clear_cancel_function() was called and set this flag * ensuring this check sees an up-to-date value. However, * there is no way to assert that. */ return ((job->jobflags & KAIOCB_CLEARED) != 0); } static bool aio_clear_cancel_function_locked(struct kaiocb *job) { AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED); MPASS(job->cancel_fn != NULL); if (job->jobflags & KAIOCB_CANCELLING) { job->jobflags |= KAIOCB_CLEARED; return (false); } job->cancel_fn = NULL; return (true); } bool aio_clear_cancel_function(struct kaiocb *job) { struct kaioinfo *ki; bool ret; ki = job->userproc->p_aioinfo; AIO_LOCK(ki); ret = aio_clear_cancel_function_locked(job); AIO_UNLOCK(ki); return (ret); } static bool aio_set_cancel_function_locked(struct kaiocb *job, aio_cancel_fn_t *func) { AIO_LOCK_ASSERT(job->userproc->p_aioinfo, MA_OWNED); if (job->jobflags & KAIOCB_CANCELLED) return (false); job->cancel_fn = func; return (true); } bool aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func) { struct kaioinfo *ki; bool ret; ki = job->userproc->p_aioinfo; AIO_LOCK(ki); ret = aio_set_cancel_function_locked(job, func); AIO_UNLOCK(ki); return (ret); } void aio_complete(struct kaiocb *job, long status, int error) { struct kaioinfo *ki; struct proc *userp; job->uaiocb._aiocb_private.error = error; job->uaiocb._aiocb_private.status = status; userp = job->userproc; ki = userp->p_aioinfo; AIO_LOCK(ki); KASSERT(!(job->jobflags & KAIOCB_FINISHED), ("duplicate aio_complete")); job->jobflags |= KAIOCB_FINISHED; if ((job->jobflags & (KAIOCB_QUEUEING | KAIOCB_CANCELLING)) == 0) { TAILQ_REMOVE(&ki->kaio_jobqueue, job, plist); aio_bio_done_notify(userp, job); } AIO_UNLOCK(ki); } void aio_cancel(struct kaiocb *job) { aio_complete(job, -1, ECANCELED); } void aio_switch_vmspace(struct kaiocb *job) { vmspace_switch_aio(job->userproc->p_vmspace); } /* * The AIO daemon, most of the actual work is done in aio_process_*, * but the setup (and address space mgmt) is done in this routine. */ static void aio_daemon(void *_id) { struct kaiocb *job; struct aioproc *aiop; struct kaioinfo *ki; struct proc *p; struct vmspace *myvm; struct thread *td = curthread; int id = (intptr_t)_id; /* * Grab an extra reference on the daemon's vmspace so that it * doesn't get freed by jobs that switch to a different * vmspace. */ p = td->td_proc; myvm = vmspace_acquire_ref(p); KASSERT(p->p_textvp == NULL, ("kthread has a textvp")); /* * Allocate and ready the aio control info. There is one aiop structure * per daemon. */ aiop = uma_zalloc(aiop_zone, M_WAITOK); aiop->aioproc = p; aiop->aioprocflags = 0; /* * Wakeup parent process. (Parent sleeps to keep from blasting away * and creating too many daemons.) */ sema_post(&aio_newproc_sem); mtx_lock(&aio_job_mtx); for (;;) { /* * Take daemon off of free queue */ if (aiop->aioprocflags & AIOP_FREE) { TAILQ_REMOVE(&aio_freeproc, aiop, list); aiop->aioprocflags &= ~AIOP_FREE; } /* * Check for jobs. */ while ((job = aio_selectjob(aiop)) != NULL) { mtx_unlock(&aio_job_mtx); ki = job->userproc->p_aioinfo; job->handle_fn(job); mtx_lock(&aio_job_mtx); /* Decrement the active job count. */ ki->kaio_active_count--; } /* * Disconnect from user address space. */ if (p->p_vmspace != myvm) { mtx_unlock(&aio_job_mtx); vmspace_switch_aio(myvm); mtx_lock(&aio_job_mtx); /* * We have to restart to avoid race, we only sleep if * no job can be selected. */ continue; } mtx_assert(&aio_job_mtx, MA_OWNED); TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list); aiop->aioprocflags |= AIOP_FREE; /* * If daemon is inactive for a long time, allow it to exit, * thereby freeing resources. */ if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy", aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) && (aiop->aioprocflags & AIOP_FREE) && num_aio_procs > target_aio_procs) break; } TAILQ_REMOVE(&aio_freeproc, aiop, list); num_aio_procs--; mtx_unlock(&aio_job_mtx); uma_zfree(aiop_zone, aiop); free_unr(aiod_unr, id); vmspace_free(myvm); KASSERT(p->p_vmspace == myvm, ("AIOD: bad vmspace for exiting daemon")); KASSERT(myvm->vm_refcnt > 1, ("AIOD: bad vm refcnt for exiting daemon: %d", myvm->vm_refcnt)); kproc_exit(0); } /* * Create a new AIO daemon. This is mostly a kernel-thread fork routine. The * AIO daemon modifies its environment itself. */ static int aio_newproc(int *start) { int error; struct proc *p; int id; id = alloc_unr(aiod_unr); error = kproc_create(aio_daemon, (void *)(intptr_t)id, &p, RFNOWAIT, 0, "aiod%d", id); if (error == 0) { /* * Wait until daemon is started. */ sema_wait(&aio_newproc_sem); mtx_lock(&aio_job_mtx); num_aio_procs++; if (start != NULL) (*start)--; mtx_unlock(&aio_job_mtx); } else { free_unr(aiod_unr, id); } return (error); } /* * Try the high-performance, low-overhead physio method for eligible * VCHR devices. This method doesn't use an aio helper thread, and * thus has very low overhead. * * Assumes that the caller, aio_aqueue(), has incremented the file * structure's reference count, preventing its deallocation for the * duration of this call. */ static int aio_qphysio(struct proc *p, struct kaiocb *job) { struct aiocb *cb; struct file *fp; struct bio *bp; struct buf *pbuf; struct vnode *vp; struct cdevsw *csw; struct cdev *dev; struct kaioinfo *ki; int error, ref, poff; vm_prot_t prot; cb = &job->uaiocb; fp = job->fd_file; if (!(cb->aio_lio_opcode == LIO_WRITE || cb->aio_lio_opcode == LIO_READ)) return (-1); if (fp == NULL || fp->f_type != DTYPE_VNODE) return (-1); vp = fp->f_vnode; if (vp->v_type != VCHR) return (-1); if (vp->v_bufobj.bo_bsize == 0) return (-1); if (cb->aio_nbytes % vp->v_bufobj.bo_bsize) return (-1); ref = 0; csw = devvn_refthread(vp, &dev, &ref); if (csw == NULL) return (ENXIO); if ((csw->d_flags & D_DISK) == 0) { error = -1; goto unref; } if (cb->aio_nbytes > dev->si_iosize_max) { error = -1; goto unref; } ki = p->p_aioinfo; poff = (vm_offset_t)cb->aio_buf & PAGE_MASK; if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) { if (cb->aio_nbytes > MAXPHYS) { error = -1; goto unref; } pbuf = NULL; } else { if (cb->aio_nbytes > MAXPHYS - poff) { error = -1; goto unref; } if (ki->kaio_buffer_count >= max_buf_aio) { error = EAGAIN; goto unref; } job->pbuf = pbuf = (struct buf *)getpbuf(NULL); BUF_KERNPROC(pbuf); AIO_LOCK(ki); ki->kaio_buffer_count++; AIO_UNLOCK(ki); } job->bp = bp = g_alloc_bio(); bp->bio_length = cb->aio_nbytes; bp->bio_bcount = cb->aio_nbytes; bp->bio_done = aio_physwakeup; bp->bio_data = (void *)(uintptr_t)cb->aio_buf; bp->bio_offset = cb->aio_offset; bp->bio_cmd = cb->aio_lio_opcode == LIO_WRITE ? BIO_WRITE : BIO_READ; bp->bio_dev = dev; bp->bio_caller1 = (void *)job; prot = VM_PROT_READ; if (cb->aio_lio_opcode == LIO_READ) prot |= VM_PROT_WRITE; /* Less backwards than it looks */ job->npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, (vm_offset_t)bp->bio_data, bp->bio_length, prot, job->pages, nitems(job->pages)); if (job->npages < 0) { error = EFAULT; goto doerror; } if (pbuf != NULL) { pmap_qenter((vm_offset_t)pbuf->b_data, job->pages, job->npages); bp->bio_data = pbuf->b_data + poff; atomic_add_int(&num_buf_aio, 1); } else { bp->bio_ma = job->pages; bp->bio_ma_n = job->npages; bp->bio_ma_offset = poff; bp->bio_data = unmapped_buf; bp->bio_flags |= BIO_UNMAPPED; atomic_add_int(&num_unmapped_aio, 1); } /* Perform transfer. */ csw->d_strategy(bp); dev_relthread(dev, ref); return (0); doerror: if (pbuf != NULL) { AIO_LOCK(ki); ki->kaio_buffer_count--; AIO_UNLOCK(ki); relpbuf(pbuf, NULL); job->pbuf = NULL; } g_destroy_bio(bp); job->bp = NULL; unref: dev_relthread(dev, ref); return (error); } #ifdef COMPAT_FREEBSD6 static int convert_old_sigevent(struct osigevent *osig, struct sigevent *nsig) { /* * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are * supported by AIO with the old sigevent structure. */ nsig->sigev_notify = osig->sigev_notify; switch (nsig->sigev_notify) { case SIGEV_NONE: break; case SIGEV_SIGNAL: nsig->sigev_signo = osig->__sigev_u.__sigev_signo; break; case SIGEV_KEVENT: nsig->sigev_notify_kqueue = osig->__sigev_u.__sigev_notify_kqueue; nsig->sigev_value.sival_ptr = osig->sigev_value.sival_ptr; break; default: return (EINVAL); } return (0); } static int aiocb_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob) { struct oaiocb *ojob; int error; bzero(kjob, sizeof(struct aiocb)); error = copyin(ujob, kjob, sizeof(struct oaiocb)); if (error) return (error); ojob = (struct oaiocb *)kjob; return (convert_old_sigevent(&ojob->aio_sigevent, &kjob->aio_sigevent)); } #endif static int aiocb_copyin(struct aiocb *ujob, struct aiocb *kjob) { return (copyin(ujob, kjob, sizeof(struct aiocb))); } static long aiocb_fetch_status(struct aiocb *ujob) { return (fuword(&ujob->_aiocb_private.status)); } static long aiocb_fetch_error(struct aiocb *ujob) { return (fuword(&ujob->_aiocb_private.error)); } static int aiocb_store_status(struct aiocb *ujob, long status) { return (suword(&ujob->_aiocb_private.status, status)); } static int aiocb_store_error(struct aiocb *ujob, long error) { return (suword(&ujob->_aiocb_private.error, error)); } static int aiocb_store_kernelinfo(struct aiocb *ujob, long jobref) { return (suword(&ujob->_aiocb_private.kernelinfo, jobref)); } static int aiocb_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob) { return (suword(ujobp, (long)ujob)); } static struct aiocb_ops aiocb_ops = { .copyin = aiocb_copyin, .fetch_status = aiocb_fetch_status, .fetch_error = aiocb_fetch_error, .store_status = aiocb_store_status, .store_error = aiocb_store_error, .store_kernelinfo = aiocb_store_kernelinfo, .store_aiocb = aiocb_store_aiocb, }; #ifdef COMPAT_FREEBSD6 static struct aiocb_ops aiocb_ops_osigevent = { .copyin = aiocb_copyin_old_sigevent, .fetch_status = aiocb_fetch_status, .fetch_error = aiocb_fetch_error, .store_status = aiocb_store_status, .store_error = aiocb_store_error, .store_kernelinfo = aiocb_store_kernelinfo, .store_aiocb = aiocb_store_aiocb, }; #endif /* * Queue a new AIO request. Choosing either the threaded or direct physio VCHR * technique is done in this code. */ int aio_aqueue(struct thread *td, struct aiocb *ujob, struct aioliojob *lj, int type, struct aiocb_ops *ops) { struct proc *p = td->td_proc; struct file *fp; struct kaiocb *job; struct kaioinfo *ki; struct kevent kev; int opcode; int error; int fd, kqfd; int jid; u_short evflags; if (p->p_aioinfo == NULL) aio_init_aioinfo(p); ki = p->p_aioinfo; ops->store_status(ujob, -1); ops->store_error(ujob, 0); ops->store_kernelinfo(ujob, -1); if (num_queue_count >= max_queue_count || ki->kaio_count >= max_aio_queue_per_proc) { ops->store_error(ujob, EAGAIN); return (EAGAIN); } job = uma_zalloc(aiocb_zone, M_WAITOK | M_ZERO); knlist_init_mtx(&job->klist, AIO_MTX(ki)); error = ops->copyin(ujob, &job->uaiocb); if (error) { ops->store_error(ujob, error); uma_zfree(aiocb_zone, job); return (error); } if (job->uaiocb.aio_nbytes > IOSIZE_MAX) { uma_zfree(aiocb_zone, job); return (EINVAL); } if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT && job->uaiocb.aio_sigevent.sigev_notify != SIGEV_SIGNAL && job->uaiocb.aio_sigevent.sigev_notify != SIGEV_THREAD_ID && job->uaiocb.aio_sigevent.sigev_notify != SIGEV_NONE) { ops->store_error(ujob, EINVAL); uma_zfree(aiocb_zone, job); return (EINVAL); } if ((job->uaiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL || job->uaiocb.aio_sigevent.sigev_notify == SIGEV_THREAD_ID) && !_SIG_VALID(job->uaiocb.aio_sigevent.sigev_signo)) { uma_zfree(aiocb_zone, job); return (EINVAL); } ksiginfo_init(&job->ksi); /* Save userspace address of the job info. */ job->ujob = ujob; /* Get the opcode. */ if (type != LIO_NOP) job->uaiocb.aio_lio_opcode = type; opcode = job->uaiocb.aio_lio_opcode; /* * Validate the opcode and fetch the file object for the specified * file descriptor. * * XXXRW: Moved the opcode validation up here so that we don't * retrieve a file descriptor without knowing what the capabiltity * should be. */ fd = job->uaiocb.aio_fildes; switch (opcode) { case LIO_WRITE: error = fget_write(td, fd, &cap_pwrite_rights, &fp); break; case LIO_READ: error = fget_read(td, fd, &cap_pread_rights, &fp); break; case LIO_SYNC: error = fget(td, fd, &cap_fsync_rights, &fp); break; case LIO_MLOCK: fp = NULL; break; case LIO_NOP: error = fget(td, fd, &cap_no_rights, &fp); break; default: error = EINVAL; } if (error) { uma_zfree(aiocb_zone, job); ops->store_error(ujob, error); return (error); } if (opcode == LIO_SYNC && fp->f_vnode == NULL) { error = EINVAL; goto aqueue_fail; } if ((opcode == LIO_READ || opcode == LIO_WRITE) && job->uaiocb.aio_offset < 0 && (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) { error = EINVAL; goto aqueue_fail; } job->fd_file = fp; mtx_lock(&aio_job_mtx); jid = jobrefid++; job->seqno = jobseqno++; mtx_unlock(&aio_job_mtx); error = ops->store_kernelinfo(ujob, jid); if (error) { error = EINVAL; goto aqueue_fail; } job->uaiocb._aiocb_private.kernelinfo = (void *)(intptr_t)jid; if (opcode == LIO_NOP) { fdrop(fp, td); uma_zfree(aiocb_zone, job); return (0); } if (job->uaiocb.aio_sigevent.sigev_notify != SIGEV_KEVENT) goto no_kqueue; evflags = job->uaiocb.aio_sigevent.sigev_notify_kevent_flags; if ((evflags & ~(EV_CLEAR | EV_DISPATCH | EV_ONESHOT)) != 0) { error = EINVAL; goto aqueue_fail; } kqfd = job->uaiocb.aio_sigevent.sigev_notify_kqueue; + memset(&kev, 0, sizeof(kev)); kev.ident = (uintptr_t)job->ujob; kev.filter = EVFILT_AIO; kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1 | evflags; kev.data = (intptr_t)job; kev.udata = job->uaiocb.aio_sigevent.sigev_value.sival_ptr; - error = kqfd_register(kqfd, &kev, td, 1); + error = kqfd_register(kqfd, &kev, td, M_WAITOK); if (error) goto aqueue_fail; no_kqueue: ops->store_error(ujob, EINPROGRESS); job->uaiocb._aiocb_private.error = EINPROGRESS; job->userproc = p; job->cred = crhold(td->td_ucred); job->jobflags = KAIOCB_QUEUEING; job->lio = lj; if (opcode == LIO_MLOCK) { aio_schedule(job, aio_process_mlock); error = 0; } else if (fp->f_ops->fo_aio_queue == NULL) error = aio_queue_file(fp, job); else error = fo_aio_queue(fp, job); if (error) goto aqueue_fail; AIO_LOCK(ki); job->jobflags &= ~KAIOCB_QUEUEING; TAILQ_INSERT_TAIL(&ki->kaio_all, job, allist); ki->kaio_count++; if (lj) lj->lioj_count++; atomic_add_int(&num_queue_count, 1); if (job->jobflags & KAIOCB_FINISHED) { /* * The queue callback completed the request synchronously. * The bulk of the completion is deferred in that case * until this point. */ aio_bio_done_notify(p, job); } else TAILQ_INSERT_TAIL(&ki->kaio_jobqueue, job, plist); AIO_UNLOCK(ki); return (0); aqueue_fail: knlist_delete(&job->klist, curthread, 0); if (fp) fdrop(fp, td); uma_zfree(aiocb_zone, job); ops->store_error(ujob, error); return (error); } static void aio_cancel_daemon_job(struct kaiocb *job) { mtx_lock(&aio_job_mtx); if (!aio_cancel_cleared(job)) TAILQ_REMOVE(&aio_jobs, job, list); mtx_unlock(&aio_job_mtx); aio_cancel(job); } void aio_schedule(struct kaiocb *job, aio_handle_fn_t *func) { mtx_lock(&aio_job_mtx); if (!aio_set_cancel_function(job, aio_cancel_daemon_job)) { mtx_unlock(&aio_job_mtx); aio_cancel(job); return; } job->handle_fn = func; TAILQ_INSERT_TAIL(&aio_jobs, job, list); aio_kick_nowait(job->userproc); mtx_unlock(&aio_job_mtx); } static void aio_cancel_sync(struct kaiocb *job) { struct kaioinfo *ki; ki = job->userproc->p_aioinfo; AIO_LOCK(ki); if (!aio_cancel_cleared(job)) TAILQ_REMOVE(&ki->kaio_syncqueue, job, list); AIO_UNLOCK(ki); aio_cancel(job); } int aio_queue_file(struct file *fp, struct kaiocb *job) { struct kaioinfo *ki; struct kaiocb *job2; struct vnode *vp; struct mount *mp; int error; bool safe; ki = job->userproc->p_aioinfo; error = aio_qphysio(job->userproc, job); if (error >= 0) return (error); safe = false; if (fp->f_type == DTYPE_VNODE) { vp = fp->f_vnode; if (vp->v_type == VREG || vp->v_type == VDIR) { mp = fp->f_vnode->v_mount; if (mp == NULL || (mp->mnt_flag & MNT_LOCAL) != 0) safe = true; } } if (!(safe || enable_aio_unsafe)) { counted_warning(&unsafe_warningcnt, "is attempting to use unsafe AIO requests"); return (EOPNOTSUPP); } switch (job->uaiocb.aio_lio_opcode) { case LIO_READ: case LIO_WRITE: aio_schedule(job, aio_process_rw); error = 0; break; case LIO_SYNC: AIO_LOCK(ki); TAILQ_FOREACH(job2, &ki->kaio_jobqueue, plist) { if (job2->fd_file == job->fd_file && job2->uaiocb.aio_lio_opcode != LIO_SYNC && job2->seqno < job->seqno) { job2->jobflags |= KAIOCB_CHECKSYNC; job->pending++; } } if (job->pending != 0) { if (!aio_set_cancel_function_locked(job, aio_cancel_sync)) { AIO_UNLOCK(ki); aio_cancel(job); return (0); } TAILQ_INSERT_TAIL(&ki->kaio_syncqueue, job, list); AIO_UNLOCK(ki); return (0); } AIO_UNLOCK(ki); aio_schedule(job, aio_process_sync); error = 0; break; default: error = EINVAL; } return (error); } static void aio_kick_nowait(struct proc *userp) { struct kaioinfo *ki = userp->p_aioinfo; struct aioproc *aiop; mtx_assert(&aio_job_mtx, MA_OWNED); if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { TAILQ_REMOVE(&aio_freeproc, aiop, list); aiop->aioprocflags &= ~AIOP_FREE; wakeup(aiop->aioproc); } else if (num_aio_resv_start + num_aio_procs < max_aio_procs && ki->kaio_active_count + num_aio_resv_start < max_aio_per_proc) { taskqueue_enqueue(taskqueue_aiod_kick, &ki->kaio_task); } } static int aio_kick(struct proc *userp) { struct kaioinfo *ki = userp->p_aioinfo; struct aioproc *aiop; int error, ret = 0; mtx_assert(&aio_job_mtx, MA_OWNED); retryproc: if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) { TAILQ_REMOVE(&aio_freeproc, aiop, list); aiop->aioprocflags &= ~AIOP_FREE; wakeup(aiop->aioproc); } else if (num_aio_resv_start + num_aio_procs < max_aio_procs && ki->kaio_active_count + num_aio_resv_start < max_aio_per_proc) { num_aio_resv_start++; mtx_unlock(&aio_job_mtx); error = aio_newproc(&num_aio_resv_start); mtx_lock(&aio_job_mtx); if (error) { num_aio_resv_start--; goto retryproc; } } else { ret = -1; } return (ret); } static void aio_kick_helper(void *context, int pending) { struct proc *userp = context; mtx_lock(&aio_job_mtx); while (--pending >= 0) { if (aio_kick(userp)) break; } mtx_unlock(&aio_job_mtx); } /* * Support the aio_return system call, as a side-effect, kernel resources are * released. */ static int kern_aio_return(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops) { struct proc *p = td->td_proc; struct kaiocb *job; struct kaioinfo *ki; long status, error; ki = p->p_aioinfo; if (ki == NULL) return (EINVAL); AIO_LOCK(ki); TAILQ_FOREACH(job, &ki->kaio_done, plist) { if (job->ujob == ujob) break; } if (job != NULL) { MPASS(job->jobflags & KAIOCB_FINISHED); status = job->uaiocb._aiocb_private.status; error = job->uaiocb._aiocb_private.error; td->td_retval[0] = status; td->td_ru.ru_oublock += job->outblock; td->td_ru.ru_inblock += job->inblock; td->td_ru.ru_msgsnd += job->msgsnd; td->td_ru.ru_msgrcv += job->msgrcv; aio_free_entry(job); AIO_UNLOCK(ki); ops->store_error(ujob, error); ops->store_status(ujob, status); } else { error = EINVAL; AIO_UNLOCK(ki); } return (error); } int sys_aio_return(struct thread *td, struct aio_return_args *uap) { return (kern_aio_return(td, uap->aiocbp, &aiocb_ops)); } /* * Allow a process to wakeup when any of the I/O requests are completed. */ static int kern_aio_suspend(struct thread *td, int njoblist, struct aiocb **ujoblist, struct timespec *ts) { struct proc *p = td->td_proc; struct timeval atv; struct kaioinfo *ki; struct kaiocb *firstjob, *job; int error, i, timo; timo = 0; if (ts) { if (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) return (EINVAL); TIMESPEC_TO_TIMEVAL(&atv, ts); if (itimerfix(&atv)) return (EINVAL); timo = tvtohz(&atv); } ki = p->p_aioinfo; if (ki == NULL) return (EAGAIN); if (njoblist == 0) return (0); AIO_LOCK(ki); for (;;) { firstjob = NULL; error = 0; TAILQ_FOREACH(job, &ki->kaio_all, allist) { for (i = 0; i < njoblist; i++) { if (job->ujob == ujoblist[i]) { if (firstjob == NULL) firstjob = job; if (job->jobflags & KAIOCB_FINISHED) goto RETURN; } } } /* All tasks were finished. */ if (firstjob == NULL) break; ki->kaio_flags |= KAIO_WAKEUP; error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH, "aiospn", timo); if (error == ERESTART) error = EINTR; if (error) break; } RETURN: AIO_UNLOCK(ki); return (error); } int sys_aio_suspend(struct thread *td, struct aio_suspend_args *uap) { struct timespec ts, *tsp; struct aiocb **ujoblist; int error; if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc) return (EINVAL); if (uap->timeout) { /* Get timespec struct. */ if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) return (error); tsp = &ts; } else tsp = NULL; ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIOS, M_WAITOK); error = copyin(uap->aiocbp, ujoblist, uap->nent * sizeof(ujoblist[0])); if (error == 0) error = kern_aio_suspend(td, uap->nent, ujoblist, tsp); free(ujoblist, M_AIOS); return (error); } /* * aio_cancel cancels any non-physio aio operations not currently in * progress. */ int sys_aio_cancel(struct thread *td, struct aio_cancel_args *uap) { struct proc *p = td->td_proc; struct kaioinfo *ki; struct kaiocb *job, *jobn; struct file *fp; int error; int cancelled = 0; int notcancelled = 0; struct vnode *vp; /* Lookup file object. */ error = fget(td, uap->fd, &cap_no_rights, &fp); if (error) return (error); ki = p->p_aioinfo; if (ki == NULL) goto done; if (fp->f_type == DTYPE_VNODE) { vp = fp->f_vnode; if (vn_isdisk(vp, &error)) { fdrop(fp, td); td->td_retval[0] = AIO_NOTCANCELED; return (0); } } AIO_LOCK(ki); TAILQ_FOREACH_SAFE(job, &ki->kaio_jobqueue, plist, jobn) { if ((uap->fd == job->uaiocb.aio_fildes) && ((uap->aiocbp == NULL) || (uap->aiocbp == job->ujob))) { if (aio_cancel_job(p, ki, job)) { cancelled++; } else { notcancelled++; } if (uap->aiocbp != NULL) break; } } AIO_UNLOCK(ki); done: fdrop(fp, td); if (uap->aiocbp != NULL) { if (cancelled) { td->td_retval[0] = AIO_CANCELED; return (0); } } if (notcancelled) { td->td_retval[0] = AIO_NOTCANCELED; return (0); } if (cancelled) { td->td_retval[0] = AIO_CANCELED; return (0); } td->td_retval[0] = AIO_ALLDONE; return (0); } /* * aio_error is implemented in the kernel level for compatibility purposes * only. For a user mode async implementation, it would be best to do it in * a userland subroutine. */ static int kern_aio_error(struct thread *td, struct aiocb *ujob, struct aiocb_ops *ops) { struct proc *p = td->td_proc; struct kaiocb *job; struct kaioinfo *ki; int status; ki = p->p_aioinfo; if (ki == NULL) { td->td_retval[0] = EINVAL; return (0); } AIO_LOCK(ki); TAILQ_FOREACH(job, &ki->kaio_all, allist) { if (job->ujob == ujob) { if (job->jobflags & KAIOCB_FINISHED) td->td_retval[0] = job->uaiocb._aiocb_private.error; else td->td_retval[0] = EINPROGRESS; AIO_UNLOCK(ki); return (0); } } AIO_UNLOCK(ki); /* * Hack for failure of aio_aqueue. */ status = ops->fetch_status(ujob); if (status == -1) { td->td_retval[0] = ops->fetch_error(ujob); return (0); } td->td_retval[0] = EINVAL; return (0); } int sys_aio_error(struct thread *td, struct aio_error_args *uap) { return (kern_aio_error(td, uap->aiocbp, &aiocb_ops)); } /* syscall - asynchronous read from a file (REALTIME) */ #ifdef COMPAT_FREEBSD6 int freebsd6_aio_read(struct thread *td, struct freebsd6_aio_read_args *uap) { return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, &aiocb_ops_osigevent)); } #endif int sys_aio_read(struct thread *td, struct aio_read_args *uap) { return (aio_aqueue(td, uap->aiocbp, NULL, LIO_READ, &aiocb_ops)); } /* syscall - asynchronous write to a file (REALTIME) */ #ifdef COMPAT_FREEBSD6 int freebsd6_aio_write(struct thread *td, struct freebsd6_aio_write_args *uap) { return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops_osigevent)); } #endif int sys_aio_write(struct thread *td, struct aio_write_args *uap) { return (aio_aqueue(td, uap->aiocbp, NULL, LIO_WRITE, &aiocb_ops)); } int sys_aio_mlock(struct thread *td, struct aio_mlock_args *uap) { return (aio_aqueue(td, uap->aiocbp, NULL, LIO_MLOCK, &aiocb_ops)); } static int kern_lio_listio(struct thread *td, int mode, struct aiocb * const *uacb_list, struct aiocb **acb_list, int nent, struct sigevent *sig, struct aiocb_ops *ops) { struct proc *p = td->td_proc; struct aiocb *job; struct kaioinfo *ki; struct aioliojob *lj; struct kevent kev; int error; int nagain, nerror; int i; if ((mode != LIO_NOWAIT) && (mode != LIO_WAIT)) return (EINVAL); if (nent < 0 || nent > max_aio_queue_per_proc) return (EINVAL); if (p->p_aioinfo == NULL) aio_init_aioinfo(p); ki = p->p_aioinfo; lj = uma_zalloc(aiolio_zone, M_WAITOK); lj->lioj_flags = 0; lj->lioj_count = 0; lj->lioj_finished_count = 0; knlist_init_mtx(&lj->klist, AIO_MTX(ki)); ksiginfo_init(&lj->lioj_ksi); /* * Setup signal. */ if (sig && (mode == LIO_NOWAIT)) { bcopy(sig, &lj->lioj_signal, sizeof(lj->lioj_signal)); if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { /* Assume only new style KEVENT */ + memset(&kev, 0, sizeof(kev)); kev.filter = EVFILT_LIO; kev.flags = EV_ADD | EV_ENABLE | EV_FLAG1; kev.ident = (uintptr_t)uacb_list; /* something unique */ kev.data = (intptr_t)lj; /* pass user defined sigval data */ kev.udata = lj->lioj_signal.sigev_value.sival_ptr; error = kqfd_register( - lj->lioj_signal.sigev_notify_kqueue, &kev, td, 1); + lj->lioj_signal.sigev_notify_kqueue, &kev, td, + M_WAITOK); if (error) { uma_zfree(aiolio_zone, lj); return (error); } } else if (lj->lioj_signal.sigev_notify == SIGEV_NONE) { ; } else if (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID) { if (!_SIG_VALID(lj->lioj_signal.sigev_signo)) { uma_zfree(aiolio_zone, lj); return EINVAL; } lj->lioj_flags |= LIOJ_SIGNAL; } else { uma_zfree(aiolio_zone, lj); return EINVAL; } } AIO_LOCK(ki); TAILQ_INSERT_TAIL(&ki->kaio_liojoblist, lj, lioj_list); /* * Add extra aiocb count to avoid the lio to be freed * by other threads doing aio_waitcomplete or aio_return, * and prevent event from being sent until we have queued * all tasks. */ lj->lioj_count = 1; AIO_UNLOCK(ki); /* * Get pointers to the list of I/O requests. */ nagain = 0; nerror = 0; for (i = 0; i < nent; i++) { job = acb_list[i]; if (job != NULL) { error = aio_aqueue(td, job, lj, LIO_NOP, ops); if (error == EAGAIN) nagain++; else if (error != 0) nerror++; } } error = 0; AIO_LOCK(ki); if (mode == LIO_WAIT) { while (lj->lioj_count - 1 != lj->lioj_finished_count) { ki->kaio_flags |= KAIO_WAKEUP; error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH, "aiospn", 0); if (error == ERESTART) error = EINTR; if (error) break; } } else { if (lj->lioj_count - 1 == lj->lioj_finished_count) { if (lj->lioj_signal.sigev_notify == SIGEV_KEVENT) { lj->lioj_flags |= LIOJ_KEVENT_POSTED; KNOTE_LOCKED(&lj->klist, 1); } if ((lj->lioj_flags & (LIOJ_SIGNAL|LIOJ_SIGNAL_POSTED)) == LIOJ_SIGNAL && (lj->lioj_signal.sigev_notify == SIGEV_SIGNAL || lj->lioj_signal.sigev_notify == SIGEV_THREAD_ID)) { aio_sendsig(p, &lj->lioj_signal, &lj->lioj_ksi); lj->lioj_flags |= LIOJ_SIGNAL_POSTED; } } } lj->lioj_count--; if (lj->lioj_count == 0) { TAILQ_REMOVE(&ki->kaio_liojoblist, lj, lioj_list); knlist_delete(&lj->klist, curthread, 1); PROC_LOCK(p); sigqueue_take(&lj->lioj_ksi); PROC_UNLOCK(p); AIO_UNLOCK(ki); uma_zfree(aiolio_zone, lj); } else AIO_UNLOCK(ki); if (nerror) return (EIO); else if (nagain) return (EAGAIN); else return (error); } /* syscall - list directed I/O (REALTIME) */ #ifdef COMPAT_FREEBSD6 int freebsd6_lio_listio(struct thread *td, struct freebsd6_lio_listio_args *uap) { struct aiocb **acb_list; struct sigevent *sigp, sig; struct osigevent osig; int error, nent; if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) return (EINVAL); nent = uap->nent; if (nent < 0 || nent > max_aio_queue_per_proc) return (EINVAL); if (uap->sig && (uap->mode == LIO_NOWAIT)) { error = copyin(uap->sig, &osig, sizeof(osig)); if (error) return (error); error = convert_old_sigevent(&osig, &sig); if (error) return (error); sigp = &sig; } else sigp = NULL; acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0])); if (error == 0) error = kern_lio_listio(td, uap->mode, (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp, &aiocb_ops_osigevent); free(acb_list, M_LIO); return (error); } #endif /* syscall - list directed I/O (REALTIME) */ int sys_lio_listio(struct thread *td, struct lio_listio_args *uap) { struct aiocb **acb_list; struct sigevent *sigp, sig; int error, nent; if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) return (EINVAL); nent = uap->nent; if (nent < 0 || nent > max_aio_queue_per_proc) return (EINVAL); if (uap->sig && (uap->mode == LIO_NOWAIT)) { error = copyin(uap->sig, &sig, sizeof(sig)); if (error) return (error); sigp = &sig; } else sigp = NULL; acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); error = copyin(uap->acb_list, acb_list, nent * sizeof(acb_list[0])); if (error == 0) error = kern_lio_listio(td, uap->mode, uap->acb_list, acb_list, nent, sigp, &aiocb_ops); free(acb_list, M_LIO); return (error); } static void aio_physwakeup(struct bio *bp) { struct kaiocb *job = (struct kaiocb *)bp->bio_caller1; struct proc *userp; struct kaioinfo *ki; size_t nbytes; int error, nblks; /* Release mapping into kernel space. */ userp = job->userproc; ki = userp->p_aioinfo; if (job->pbuf) { pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages); relpbuf(job->pbuf, NULL); job->pbuf = NULL; atomic_subtract_int(&num_buf_aio, 1); AIO_LOCK(ki); ki->kaio_buffer_count--; AIO_UNLOCK(ki); } else atomic_subtract_int(&num_unmapped_aio, 1); vm_page_unhold_pages(job->pages, job->npages); bp = job->bp; job->bp = NULL; nbytes = job->uaiocb.aio_nbytes - bp->bio_resid; error = 0; if (bp->bio_flags & BIO_ERROR) error = bp->bio_error; nblks = btodb(nbytes); if (job->uaiocb.aio_lio_opcode == LIO_WRITE) job->outblock += nblks; else job->inblock += nblks; if (error) aio_complete(job, -1, error); else aio_complete(job, nbytes, 0); g_destroy_bio(bp); } /* syscall - wait for the next completion of an aio request */ static int kern_aio_waitcomplete(struct thread *td, struct aiocb **ujobp, struct timespec *ts, struct aiocb_ops *ops) { struct proc *p = td->td_proc; struct timeval atv; struct kaioinfo *ki; struct kaiocb *job; struct aiocb *ujob; long error, status; int timo; ops->store_aiocb(ujobp, NULL); if (ts == NULL) { timo = 0; } else if (ts->tv_sec == 0 && ts->tv_nsec == 0) { timo = -1; } else { if ((ts->tv_nsec < 0) || (ts->tv_nsec >= 1000000000)) return (EINVAL); TIMESPEC_TO_TIMEVAL(&atv, ts); if (itimerfix(&atv)) return (EINVAL); timo = tvtohz(&atv); } if (p->p_aioinfo == NULL) aio_init_aioinfo(p); ki = p->p_aioinfo; error = 0; job = NULL; AIO_LOCK(ki); while ((job = TAILQ_FIRST(&ki->kaio_done)) == NULL) { if (timo == -1) { error = EWOULDBLOCK; break; } ki->kaio_flags |= KAIO_WAKEUP; error = msleep(&p->p_aioinfo, AIO_MTX(ki), PRIBIO | PCATCH, "aiowc", timo); if (timo && error == ERESTART) error = EINTR; if (error) break; } if (job != NULL) { MPASS(job->jobflags & KAIOCB_FINISHED); ujob = job->ujob; status = job->uaiocb._aiocb_private.status; error = job->uaiocb._aiocb_private.error; td->td_retval[0] = status; td->td_ru.ru_oublock += job->outblock; td->td_ru.ru_inblock += job->inblock; td->td_ru.ru_msgsnd += job->msgsnd; td->td_ru.ru_msgrcv += job->msgrcv; aio_free_entry(job); AIO_UNLOCK(ki); ops->store_aiocb(ujobp, ujob); ops->store_error(ujob, error); ops->store_status(ujob, status); } else AIO_UNLOCK(ki); return (error); } int sys_aio_waitcomplete(struct thread *td, struct aio_waitcomplete_args *uap) { struct timespec ts, *tsp; int error; if (uap->timeout) { /* Get timespec struct. */ error = copyin(uap->timeout, &ts, sizeof(ts)); if (error) return (error); tsp = &ts; } else tsp = NULL; return (kern_aio_waitcomplete(td, uap->aiocbp, tsp, &aiocb_ops)); } static int kern_aio_fsync(struct thread *td, int op, struct aiocb *ujob, struct aiocb_ops *ops) { if (op != O_SYNC) /* XXX lack of O_DSYNC */ return (EINVAL); return (aio_aqueue(td, ujob, NULL, LIO_SYNC, ops)); } int sys_aio_fsync(struct thread *td, struct aio_fsync_args *uap) { return (kern_aio_fsync(td, uap->op, uap->aiocbp, &aiocb_ops)); } /* kqueue attach function */ static int filt_aioattach(struct knote *kn) { struct kaiocb *job; job = (struct kaiocb *)(uintptr_t)kn->kn_sdata; /* * The job pointer must be validated before using it, so * registration is restricted to the kernel; the user cannot * set EV_FLAG1. */ if ((kn->kn_flags & EV_FLAG1) == 0) return (EPERM); kn->kn_ptr.p_aio = job; kn->kn_flags &= ~EV_FLAG1; knlist_add(&job->klist, kn, 0); return (0); } /* kqueue detach function */ static void filt_aiodetach(struct knote *kn) { struct knlist *knl; knl = &kn->kn_ptr.p_aio->klist; knl->kl_lock(knl->kl_lockarg); if (!knlist_empty(knl)) knlist_remove(knl, kn, 1); knl->kl_unlock(knl->kl_lockarg); } /* kqueue filter function */ /*ARGSUSED*/ static int filt_aio(struct knote *kn, long hint) { struct kaiocb *job = kn->kn_ptr.p_aio; kn->kn_data = job->uaiocb._aiocb_private.error; if (!(job->jobflags & KAIOCB_FINISHED)) return (0); kn->kn_flags |= EV_EOF; return (1); } /* kqueue attach function */ static int filt_lioattach(struct knote *kn) { struct aioliojob *lj; lj = (struct aioliojob *)(uintptr_t)kn->kn_sdata; /* * The aioliojob pointer must be validated before using it, so * registration is restricted to the kernel; the user cannot * set EV_FLAG1. */ if ((kn->kn_flags & EV_FLAG1) == 0) return (EPERM); kn->kn_ptr.p_lio = lj; kn->kn_flags &= ~EV_FLAG1; knlist_add(&lj->klist, kn, 0); return (0); } /* kqueue detach function */ static void filt_liodetach(struct knote *kn) { struct knlist *knl; knl = &kn->kn_ptr.p_lio->klist; knl->kl_lock(knl->kl_lockarg); if (!knlist_empty(knl)) knlist_remove(knl, kn, 1); knl->kl_unlock(knl->kl_lockarg); } /* kqueue filter function */ /*ARGSUSED*/ static int filt_lio(struct knote *kn, long hint) { struct aioliojob * lj = kn->kn_ptr.p_lio; return (lj->lioj_flags & LIOJ_KEVENT_POSTED); } #ifdef COMPAT_FREEBSD32 #include #include #include #include #include #include #include struct __aiocb_private32 { int32_t status; int32_t error; uint32_t kernelinfo; }; #ifdef COMPAT_FREEBSD6 typedef struct oaiocb32 { int aio_fildes; /* File descriptor */ uint64_t aio_offset __packed; /* File offset for I/O */ uint32_t aio_buf; /* I/O buffer in process space */ uint32_t aio_nbytes; /* Number of bytes for I/O */ struct osigevent32 aio_sigevent; /* Signal to deliver */ int aio_lio_opcode; /* LIO opcode */ int aio_reqprio; /* Request priority -- ignored */ struct __aiocb_private32 _aiocb_private; } oaiocb32_t; #endif typedef struct aiocb32 { int32_t aio_fildes; /* File descriptor */ uint64_t aio_offset __packed; /* File offset for I/O */ uint32_t aio_buf; /* I/O buffer in process space */ uint32_t aio_nbytes; /* Number of bytes for I/O */ int __spare__[2]; uint32_t __spare2__; int aio_lio_opcode; /* LIO opcode */ int aio_reqprio; /* Request priority -- ignored */ struct __aiocb_private32 _aiocb_private; struct sigevent32 aio_sigevent; /* Signal to deliver */ } aiocb32_t; #ifdef COMPAT_FREEBSD6 static int convert_old_sigevent32(struct osigevent32 *osig, struct sigevent *nsig) { /* * Only SIGEV_NONE, SIGEV_SIGNAL, and SIGEV_KEVENT are * supported by AIO with the old sigevent structure. */ CP(*osig, *nsig, sigev_notify); switch (nsig->sigev_notify) { case SIGEV_NONE: break; case SIGEV_SIGNAL: nsig->sigev_signo = osig->__sigev_u.__sigev_signo; break; case SIGEV_KEVENT: nsig->sigev_notify_kqueue = osig->__sigev_u.__sigev_notify_kqueue; PTRIN_CP(*osig, *nsig, sigev_value.sival_ptr); break; default: return (EINVAL); } return (0); } static int aiocb32_copyin_old_sigevent(struct aiocb *ujob, struct aiocb *kjob) { struct oaiocb32 job32; int error; bzero(kjob, sizeof(struct aiocb)); error = copyin(ujob, &job32, sizeof(job32)); if (error) return (error); CP(job32, *kjob, aio_fildes); CP(job32, *kjob, aio_offset); PTRIN_CP(job32, *kjob, aio_buf); CP(job32, *kjob, aio_nbytes); CP(job32, *kjob, aio_lio_opcode); CP(job32, *kjob, aio_reqprio); CP(job32, *kjob, _aiocb_private.status); CP(job32, *kjob, _aiocb_private.error); PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo); return (convert_old_sigevent32(&job32.aio_sigevent, &kjob->aio_sigevent)); } #endif static int aiocb32_copyin(struct aiocb *ujob, struct aiocb *kjob) { struct aiocb32 job32; int error; error = copyin(ujob, &job32, sizeof(job32)); if (error) return (error); CP(job32, *kjob, aio_fildes); CP(job32, *kjob, aio_offset); PTRIN_CP(job32, *kjob, aio_buf); CP(job32, *kjob, aio_nbytes); CP(job32, *kjob, aio_lio_opcode); CP(job32, *kjob, aio_reqprio); CP(job32, *kjob, _aiocb_private.status); CP(job32, *kjob, _aiocb_private.error); PTRIN_CP(job32, *kjob, _aiocb_private.kernelinfo); return (convert_sigevent32(&job32.aio_sigevent, &kjob->aio_sigevent)); } static long aiocb32_fetch_status(struct aiocb *ujob) { struct aiocb32 *ujob32; ujob32 = (struct aiocb32 *)ujob; return (fuword32(&ujob32->_aiocb_private.status)); } static long aiocb32_fetch_error(struct aiocb *ujob) { struct aiocb32 *ujob32; ujob32 = (struct aiocb32 *)ujob; return (fuword32(&ujob32->_aiocb_private.error)); } static int aiocb32_store_status(struct aiocb *ujob, long status) { struct aiocb32 *ujob32; ujob32 = (struct aiocb32 *)ujob; return (suword32(&ujob32->_aiocb_private.status, status)); } static int aiocb32_store_error(struct aiocb *ujob, long error) { struct aiocb32 *ujob32; ujob32 = (struct aiocb32 *)ujob; return (suword32(&ujob32->_aiocb_private.error, error)); } static int aiocb32_store_kernelinfo(struct aiocb *ujob, long jobref) { struct aiocb32 *ujob32; ujob32 = (struct aiocb32 *)ujob; return (suword32(&ujob32->_aiocb_private.kernelinfo, jobref)); } static int aiocb32_store_aiocb(struct aiocb **ujobp, struct aiocb *ujob) { return (suword32(ujobp, (long)ujob)); } static struct aiocb_ops aiocb32_ops = { .copyin = aiocb32_copyin, .fetch_status = aiocb32_fetch_status, .fetch_error = aiocb32_fetch_error, .store_status = aiocb32_store_status, .store_error = aiocb32_store_error, .store_kernelinfo = aiocb32_store_kernelinfo, .store_aiocb = aiocb32_store_aiocb, }; #ifdef COMPAT_FREEBSD6 static struct aiocb_ops aiocb32_ops_osigevent = { .copyin = aiocb32_copyin_old_sigevent, .fetch_status = aiocb32_fetch_status, .fetch_error = aiocb32_fetch_error, .store_status = aiocb32_store_status, .store_error = aiocb32_store_error, .store_kernelinfo = aiocb32_store_kernelinfo, .store_aiocb = aiocb32_store_aiocb, }; #endif int freebsd32_aio_return(struct thread *td, struct freebsd32_aio_return_args *uap) { return (kern_aio_return(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops)); } int freebsd32_aio_suspend(struct thread *td, struct freebsd32_aio_suspend_args *uap) { struct timespec32 ts32; struct timespec ts, *tsp; struct aiocb **ujoblist; uint32_t *ujoblist32; int error, i; if (uap->nent < 0 || uap->nent > max_aio_queue_per_proc) return (EINVAL); if (uap->timeout) { /* Get timespec struct. */ if ((error = copyin(uap->timeout, &ts32, sizeof(ts32))) != 0) return (error); CP(ts32, ts, tv_sec); CP(ts32, ts, tv_nsec); tsp = &ts; } else tsp = NULL; ujoblist = malloc(uap->nent * sizeof(ujoblist[0]), M_AIOS, M_WAITOK); ujoblist32 = (uint32_t *)ujoblist; error = copyin(uap->aiocbp, ujoblist32, uap->nent * sizeof(ujoblist32[0])); if (error == 0) { for (i = uap->nent - 1; i >= 0; i--) ujoblist[i] = PTRIN(ujoblist32[i]); error = kern_aio_suspend(td, uap->nent, ujoblist, tsp); } free(ujoblist, M_AIOS); return (error); } int freebsd32_aio_error(struct thread *td, struct freebsd32_aio_error_args *uap) { return (kern_aio_error(td, (struct aiocb *)uap->aiocbp, &aiocb32_ops)); } #ifdef COMPAT_FREEBSD6 int freebsd6_freebsd32_aio_read(struct thread *td, struct freebsd6_freebsd32_aio_read_args *uap) { return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, &aiocb32_ops_osigevent)); } #endif int freebsd32_aio_read(struct thread *td, struct freebsd32_aio_read_args *uap) { return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_READ, &aiocb32_ops)); } #ifdef COMPAT_FREEBSD6 int freebsd6_freebsd32_aio_write(struct thread *td, struct freebsd6_freebsd32_aio_write_args *uap) { return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, &aiocb32_ops_osigevent)); } #endif int freebsd32_aio_write(struct thread *td, struct freebsd32_aio_write_args *uap) { return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_WRITE, &aiocb32_ops)); } int freebsd32_aio_mlock(struct thread *td, struct freebsd32_aio_mlock_args *uap) { return (aio_aqueue(td, (struct aiocb *)uap->aiocbp, NULL, LIO_MLOCK, &aiocb32_ops)); } int freebsd32_aio_waitcomplete(struct thread *td, struct freebsd32_aio_waitcomplete_args *uap) { struct timespec32 ts32; struct timespec ts, *tsp; int error; if (uap->timeout) { /* Get timespec struct. */ error = copyin(uap->timeout, &ts32, sizeof(ts32)); if (error) return (error); CP(ts32, ts, tv_sec); CP(ts32, ts, tv_nsec); tsp = &ts; } else tsp = NULL; return (kern_aio_waitcomplete(td, (struct aiocb **)uap->aiocbp, tsp, &aiocb32_ops)); } int freebsd32_aio_fsync(struct thread *td, struct freebsd32_aio_fsync_args *uap) { return (kern_aio_fsync(td, uap->op, (struct aiocb *)uap->aiocbp, &aiocb32_ops)); } #ifdef COMPAT_FREEBSD6 int freebsd6_freebsd32_lio_listio(struct thread *td, struct freebsd6_freebsd32_lio_listio_args *uap) { struct aiocb **acb_list; struct sigevent *sigp, sig; struct osigevent32 osig; uint32_t *acb_list32; int error, i, nent; if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) return (EINVAL); nent = uap->nent; if (nent < 0 || nent > max_aio_queue_per_proc) return (EINVAL); if (uap->sig && (uap->mode == LIO_NOWAIT)) { error = copyin(uap->sig, &osig, sizeof(osig)); if (error) return (error); error = convert_old_sigevent32(&osig, &sig); if (error) return (error); sigp = &sig; } else sigp = NULL; acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK); error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t)); if (error) { free(acb_list32, M_LIO); return (error); } acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); for (i = 0; i < nent; i++) acb_list[i] = PTRIN(acb_list32[i]); free(acb_list32, M_LIO); error = kern_lio_listio(td, uap->mode, (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp, &aiocb32_ops_osigevent); free(acb_list, M_LIO); return (error); } #endif int freebsd32_lio_listio(struct thread *td, struct freebsd32_lio_listio_args *uap) { struct aiocb **acb_list; struct sigevent *sigp, sig; struct sigevent32 sig32; uint32_t *acb_list32; int error, i, nent; if ((uap->mode != LIO_NOWAIT) && (uap->mode != LIO_WAIT)) return (EINVAL); nent = uap->nent; if (nent < 0 || nent > max_aio_queue_per_proc) return (EINVAL); if (uap->sig && (uap->mode == LIO_NOWAIT)) { error = copyin(uap->sig, &sig32, sizeof(sig32)); if (error) return (error); error = convert_sigevent32(&sig32, &sig); if (error) return (error); sigp = &sig; } else sigp = NULL; acb_list32 = malloc(sizeof(uint32_t) * nent, M_LIO, M_WAITOK); error = copyin(uap->acb_list, acb_list32, nent * sizeof(uint32_t)); if (error) { free(acb_list32, M_LIO); return (error); } acb_list = malloc(sizeof(struct aiocb *) * nent, M_LIO, M_WAITOK); for (i = 0; i < nent; i++) acb_list[i] = PTRIN(acb_list32[i]); free(acb_list32, M_LIO); error = kern_lio_listio(td, uap->mode, (struct aiocb * const *)uap->acb_list, acb_list, nent, sigp, &aiocb32_ops); free(acb_list, M_LIO); return (error); } #endif Index: projects/clang700-import/sys/sys/event.h =================================================================== --- projects/clang700-import/sys/sys/event.h (revision 340917) +++ projects/clang700-import/sys/sys/event.h (revision 340918) @@ -1,369 +1,369 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999,2000,2001 Jonathan Lemon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_EVENT_H_ #define _SYS_EVENT_H_ #include #include #define EVFILT_READ (-1) #define EVFILT_WRITE (-2) #define EVFILT_AIO (-3) /* attached to aio requests */ #define EVFILT_VNODE (-4) /* attached to vnodes */ #define EVFILT_PROC (-5) /* attached to struct proc */ #define EVFILT_SIGNAL (-6) /* attached to struct proc */ #define EVFILT_TIMER (-7) /* timers */ #define EVFILT_PROCDESC (-8) /* attached to process descriptors */ #define EVFILT_FS (-9) /* filesystem events */ #define EVFILT_LIO (-10) /* attached to lio requests */ #define EVFILT_USER (-11) /* User events */ #define EVFILT_SENDFILE (-12) /* attached to sendfile requests */ #define EVFILT_EMPTY (-13) /* empty send socket buf */ #define EVFILT_SYSCOUNT 13 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define EV_SET(kevp_, a, b, c, d, e, f) do { \ *(kevp_) = (struct kevent){ \ .ident = (a), \ .filter = (b), \ .flags = (c), \ .fflags = (d), \ .data = (e), \ .udata = (f), \ .ext = {0}, \ }; \ } while(0) #else /* Pre-C99 or not STDC (e.g., C++) */ /* The definition of the local variable kevp could possibly conflict * with a user-defined value passed in parameters a-f. */ #define EV_SET(kevp_, a, b, c, d, e, f) do { \ struct kevent *kevp = (kevp_); \ (kevp)->ident = (a); \ (kevp)->filter = (b); \ (kevp)->flags = (c); \ (kevp)->fflags = (d); \ (kevp)->data = (e); \ (kevp)->udata = (f); \ (kevp)->ext[0] = 0; \ (kevp)->ext[1] = 0; \ (kevp)->ext[2] = 0; \ (kevp)->ext[3] = 0; \ } while(0) #endif struct kevent { __uintptr_t ident; /* identifier for this event */ short filter; /* filter for event */ unsigned short flags; /* action flags for kqueue */ unsigned int fflags; /* filter flag value */ __int64_t data; /* filter data value */ void *udata; /* opaque user data identifier */ __uint64_t ext[4]; /* extensions */ }; #if defined(_WANT_FREEBSD11_KEVENT) /* Older structure used in FreeBSD 11.x and older. */ struct kevent_freebsd11 { __uintptr_t ident; /* identifier for this event */ short filter; /* filter for event */ unsigned short flags; unsigned int fflags; __intptr_t data; void *udata; /* opaque user data identifier */ }; #endif #if defined(_WANT_KEVENT32) || (defined(_KERNEL) && defined(__LP64__)) struct kevent32 { uint32_t ident; /* identifier for this event */ short filter; /* filter for event */ u_short flags; u_int fflags; #ifndef __amd64__ uint32_t pad0; #endif int32_t data1, data2; uint32_t udata; /* opaque user data identifier */ #ifndef __amd64__ uint32_t pad1; #endif uint32_t ext64[8]; }; #ifdef _WANT_FREEBSD11_KEVENT struct kevent32_freebsd11 { u_int32_t ident; /* identifier for this event */ short filter; /* filter for event */ u_short flags; u_int fflags; int32_t data; u_int32_t udata; /* opaque user data identifier */ }; #endif #endif /* actions */ #define EV_ADD 0x0001 /* add event to kq (implies enable) */ #define EV_DELETE 0x0002 /* delete event from kq */ #define EV_ENABLE 0x0004 /* enable event */ #define EV_DISABLE 0x0008 /* disable event (not reported) */ #define EV_FORCEONESHOT 0x0100 /* enable _ONESHOT and force trigger */ /* flags */ #define EV_ONESHOT 0x0010 /* only report one occurrence */ #define EV_CLEAR 0x0020 /* clear event state after reporting */ #define EV_RECEIPT 0x0040 /* force EV_ERROR on success, data=0 */ #define EV_DISPATCH 0x0080 /* disable event after reporting */ #define EV_SYSFLAGS 0xF000 /* reserved by system */ #define EV_DROP 0x1000 /* note should be dropped */ #define EV_FLAG1 0x2000 /* filter-specific flag */ #define EV_FLAG2 0x4000 /* filter-specific flag */ /* returned values */ #define EV_EOF 0x8000 /* EOF detected */ #define EV_ERROR 0x4000 /* error, data contains errno */ /* * data/hint flags/masks for EVFILT_USER, shared with userspace * * On input, the top two bits of fflags specifies how the lower twenty four * bits should be applied to the stored value of fflags. * * On output, the top two bits will always be set to NOTE_FFNOP and the * remaining twenty four bits will contain the stored fflags value. */ #define NOTE_FFNOP 0x00000000 /* ignore input fflags */ #define NOTE_FFAND 0x40000000 /* AND fflags */ #define NOTE_FFOR 0x80000000 /* OR fflags */ #define NOTE_FFCOPY 0xc0000000 /* copy fflags */ #define NOTE_FFCTRLMASK 0xc0000000 /* masks for operations */ #define NOTE_FFLAGSMASK 0x00ffffff #define NOTE_TRIGGER 0x01000000 /* Cause the event to be triggered for output. */ /* * data/hint flags for EVFILT_{READ|WRITE}, shared with userspace */ #define NOTE_LOWAT 0x0001 /* low water mark */ #define NOTE_FILE_POLL 0x0002 /* behave like poll() */ /* * data/hint flags for EVFILT_VNODE, shared with userspace */ #define NOTE_DELETE 0x0001 /* vnode was removed */ #define NOTE_WRITE 0x0002 /* data contents changed */ #define NOTE_EXTEND 0x0004 /* size increased */ #define NOTE_ATTRIB 0x0008 /* attributes changed */ #define NOTE_LINK 0x0010 /* link count changed */ #define NOTE_RENAME 0x0020 /* vnode was renamed */ #define NOTE_REVOKE 0x0040 /* vnode access was revoked */ #define NOTE_OPEN 0x0080 /* vnode was opened */ #define NOTE_CLOSE 0x0100 /* file closed, fd did not allowed write */ #define NOTE_CLOSE_WRITE 0x0200 /* file closed, fd did allowed write */ #define NOTE_READ 0x0400 /* file was read */ /* * data/hint flags for EVFILT_PROC and EVFILT_PROCDESC, shared with userspace */ #define NOTE_EXIT 0x80000000 /* process exited */ #define NOTE_FORK 0x40000000 /* process forked */ #define NOTE_EXEC 0x20000000 /* process exec'd */ #define NOTE_PCTRLMASK 0xf0000000 /* mask for hint bits */ #define NOTE_PDATAMASK 0x000fffff /* mask for pid */ /* additional flags for EVFILT_PROC */ #define NOTE_TRACK 0x00000001 /* follow across forks */ #define NOTE_TRACKERR 0x00000002 /* could not track child */ #define NOTE_CHILD 0x00000004 /* am a child process */ /* additional flags for EVFILT_TIMER */ #define NOTE_SECONDS 0x00000001 /* data is seconds */ #define NOTE_MSECONDS 0x00000002 /* data is milliseconds */ #define NOTE_USECONDS 0x00000004 /* data is microseconds */ #define NOTE_NSECONDS 0x00000008 /* data is nanoseconds */ #define NOTE_ABSTIME 0x00000010 /* timeout is absolute */ struct knote; SLIST_HEAD(klist, knote); struct kqueue; TAILQ_HEAD(kqlist, kqueue); struct knlist { struct klist kl_list; void (*kl_lock)(void *); /* lock function */ void (*kl_unlock)(void *); void (*kl_assert_locked)(void *); void (*kl_assert_unlocked)(void *); void *kl_lockarg; /* argument passed to lock functions */ int kl_autodestroy; }; #ifdef _KERNEL /* * Flags for knote call */ #define KNF_LISTLOCKED 0x0001 /* knlist is locked */ #define KNF_NOKQLOCK 0x0002 /* do not keep KQ_LOCK */ #define KNOTE(list, hint, flags) knote(list, hint, flags) #define KNOTE_LOCKED(list, hint) knote(list, hint, KNF_LISTLOCKED) #define KNOTE_UNLOCKED(list, hint) knote(list, hint, 0) #define KNLIST_EMPTY(list) SLIST_EMPTY(&(list)->kl_list) /* * Flag indicating hint is a signal. Used by EVFILT_SIGNAL, and also * shared by EVFILT_PROC (all knotes attached to p->p_klist) */ #define NOTE_SIGNAL 0x08000000 /* * Hint values for the optional f_touch event filter. If f_touch is not set * to NULL and f_isfd is zero the f_touch filter will be called with the type * argument set to EVENT_REGISTER during a kevent() system call. It is also * called under the same conditions with the type argument set to EVENT_PROCESS * when the event has been triggered. */ #define EVENT_REGISTER 1 #define EVENT_PROCESS 2 struct filterops { int f_isfd; /* true if ident == filedescriptor */ int (*f_attach)(struct knote *kn); void (*f_detach)(struct knote *kn); int (*f_event)(struct knote *kn, long hint); void (*f_touch)(struct knote *kn, struct kevent *kev, u_long type); }; /* * An in-flux knote cannot be dropped from its kq while the kq is * unlocked. If the KN_SCAN flag is not set, a thread can only set * kn_influx when it is exclusive owner of the knote state, and can * modify kn_status as if it had the KQ lock. KN_SCAN must not be set * on a knote which is already in flux. * * kn_sfflags, kn_sdata, and kn_kevent are protected by the knlist lock. */ struct knote { SLIST_ENTRY(knote) kn_link; /* for kq */ SLIST_ENTRY(knote) kn_selnext; /* for struct selinfo */ struct knlist *kn_knlist; /* f_attach populated */ TAILQ_ENTRY(knote) kn_tqe; struct kqueue *kn_kq; /* which queue we are on */ struct kevent kn_kevent; void *kn_hook; int kn_hookid; int kn_status; /* protected by kq lock */ #define KN_ACTIVE 0x01 /* event has been triggered */ #define KN_QUEUED 0x02 /* event is on queue */ #define KN_DISABLED 0x04 /* event is disabled */ #define KN_DETACHED 0x08 /* knote is detached */ #define KN_MARKER 0x20 /* ignore this knote */ #define KN_KQUEUE 0x40 /* this knote belongs to a kq */ #define KN_SCAN 0x100 /* flux set in kqueue_scan() */ int kn_influx; int kn_sfflags; /* saved filter flags */ int64_t kn_sdata; /* saved data field */ union { struct file *p_fp; /* file data pointer */ struct proc *p_proc; /* proc pointer */ struct kaiocb *p_aio; /* AIO job pointer */ struct aioliojob *p_lio; /* LIO job pointer */ void *p_v; /* generic other pointer */ } kn_ptr; struct filterops *kn_fop; #define kn_id kn_kevent.ident #define kn_filter kn_kevent.filter #define kn_flags kn_kevent.flags #define kn_fflags kn_kevent.fflags #define kn_data kn_kevent.data #define kn_fp kn_ptr.p_fp }; struct kevent_copyops { void *arg; int (*k_copyout)(void *arg, struct kevent *kevp, int count); int (*k_copyin)(void *arg, struct kevent *kevp, int count); size_t kevent_size; }; struct thread; struct proc; struct knlist; struct mtx; struct rwlock; void knote(struct knlist *list, long hint, int lockflags); void knote_fork(struct knlist *list, int pid); struct knlist *knlist_alloc(struct mtx *lock); void knlist_detach(struct knlist *knl); void knlist_add(struct knlist *knl, struct knote *kn, int islocked); void knlist_remove(struct knlist *knl, struct knote *kn, int islocked); int knlist_empty(struct knlist *knl); void knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), void (*kl_unlock)(void *), void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *)); void knlist_init_mtx(struct knlist *knl, struct mtx *lock); void knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock); void knlist_destroy(struct knlist *knl); void knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn); #define knlist_clear(knl, islocked) \ knlist_cleardel((knl), NULL, (islocked), 0) #define knlist_delete(knl, td, islocked) \ knlist_cleardel((knl), (td), (islocked), 1) void knote_fdclose(struct thread *p, int fd); int kqfd_register(int fd, struct kevent *kev, struct thread *p, - int waitok); + int mflag); int kqueue_add_filteropts(int filt, struct filterops *filtops); int kqueue_del_filteropts(int filt); #else /* !_KERNEL */ #include struct timespec; __BEGIN_DECLS int kqueue(void); int kevent(int kq, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); __END_DECLS #endif /* !_KERNEL */ #endif /* !_SYS_EVENT_H_ */ Index: projects/clang700-import =================================================================== --- projects/clang700-import (revision 340917) +++ projects/clang700-import (revision 340918) Property changes on: projects/clang700-import ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r340869-340917