diff --git a/sys/compat/linuxkpi/common/include/linux/completion.h b/sys/compat/linuxkpi/common/include/linux/completion.h index 26e41a51c10b..9f8bebb4cf82 100644 --- a/sys/compat/linuxkpi/common/include/linux/completion.h +++ b/sys/compat/linuxkpi/common/include/linux/completion.h @@ -1,67 +1,68 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_COMPLETION_H_ #define _LINUXKPI_LINUX_COMPLETION_H_ #include struct completion { unsigned int done; }; #define INIT_COMPLETION(c) \ ((c).done = 0) #define init_completion(c) \ do { (c)->done = 0; } while (0) #define reinit_completion(c) \ do { (c)->done = 0; } while (0) #define complete(c) \ linux_complete_common((c), 0) #define complete_all(c) \ linux_complete_common((c), 1) #define wait_for_completion(c) \ linux_wait_for_common((c), 0) #define wait_for_completion_interruptible(c) \ linux_wait_for_common((c), 1) #define wait_for_completion_timeout(c, timeout) \ linux_wait_for_timeout_common((c), (timeout), 0) #define wait_for_completion_interruptible_timeout(c, timeout) \ linux_wait_for_timeout_common((c), (timeout), 1) #define try_wait_for_completion(c) \ linux_try_wait_for_completion(c) #define completion_done(c) \ linux_completion_done(c) extern void linux_complete_common(struct completion *, int); extern int linux_wait_for_common(struct completion *, int); -extern int linux_wait_for_timeout_common(struct completion *, int, int); +extern unsigned long linux_wait_for_timeout_common(struct completion *, + unsigned long, int); extern int linux_try_wait_for_completion(struct completion *); extern int linux_completion_done(struct completion *); #endif /* _LINUXKPI_LINUX_COMPLETION_H_ */ diff --git a/sys/compat/linuxkpi/common/include/linux/jiffies.h b/sys/compat/linuxkpi/common/include/linux/jiffies.h index f099caa1ce18..df6ca129b37c 100644 --- a/sys/compat/linuxkpi/common/include/linux/jiffies.h +++ b/sys/compat/linuxkpi/common/include/linux/jiffies.h @@ -1,153 +1,154 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_JIFFIES_H_ #define _LINUXKPI_LINUX_JIFFIES_H_ #include #include -#include #include #include +#include -#define jiffies ticks -#define jiffies_64 ticks +extern unsigned long jiffies; /* defined in sys/kern/subr_ticks.S */ +#define jiffies_64 jiffies /* XXX-MJ wrong on 32-bit platforms */ #define jiffies_to_msecs(x) ((unsigned int)(((int64_t)(int)(x)) * 1000 / hz)) -#define MAX_JIFFY_OFFSET ((INT_MAX >> 1) - 1) +#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1) - 1) -#define time_after(a, b) ((int)((b) - (a)) < 0) +#define time_after(a, b) ((long)((b) - (a)) < 0) #define time_after32(a, b) ((int32_t)((uint32_t)(b) - (uint32_t)(a)) < 0) #define time_before(a, b) time_after(b,a) #define time_before32(a, b) time_after32(b, a) -#define time_after_eq(a, b) ((int)((a) - (b)) >= 0) +#define time_after_eq(a, b) ((long)((a) - (b)) >= 0) #define time_before_eq(a, b) time_after_eq(b, a) #define time_in_range(a,b,c) \ (time_after_eq(a,b) && time_before_eq(a,c)) #define time_is_after_eq_jiffies(a) time_after_eq(a, jiffies) #define time_is_after_jiffies(a) time_after(a, jiffies) #define time_is_before_jiffies(a) time_before(a, jiffies) #define HZ hz extern uint64_t lkpi_nsec2hz_rem; extern uint64_t lkpi_nsec2hz_div; extern uint64_t lkpi_nsec2hz_max; extern uint64_t lkpi_usec2hz_rem; extern uint64_t lkpi_usec2hz_div; extern uint64_t lkpi_usec2hz_max; extern uint64_t lkpi_msec2hz_rem; extern uint64_t lkpi_msec2hz_div; extern uint64_t lkpi_msec2hz_max; -static inline int +static inline unsigned long msecs_to_jiffies(uint64_t msec) { uint64_t result; if (msec > lkpi_msec2hz_max) msec = lkpi_msec2hz_max; result = howmany(msec * lkpi_msec2hz_rem, lkpi_msec2hz_div); if (result > MAX_JIFFY_OFFSET) result = MAX_JIFFY_OFFSET; - return ((int)result); + return ((unsigned long)result); } -static inline int +static inline unsigned long usecs_to_jiffies(uint64_t usec) { uint64_t result; if (usec > lkpi_usec2hz_max) usec = lkpi_usec2hz_max; result = howmany(usec * lkpi_usec2hz_rem, lkpi_usec2hz_div); if (result > MAX_JIFFY_OFFSET) result = MAX_JIFFY_OFFSET; - return ((int)result); + return ((unsigned long)result); } static inline uint64_t nsecs_to_jiffies64(uint64_t nsec) { if (nsec > lkpi_nsec2hz_max) nsec = lkpi_nsec2hz_max; return (howmany(nsec * lkpi_nsec2hz_rem, lkpi_nsec2hz_div)); } static inline unsigned long nsecs_to_jiffies(uint64_t nsec) { if (sizeof(unsigned long) >= sizeof(uint64_t)) { if (nsec > lkpi_nsec2hz_max) nsec = lkpi_nsec2hz_max; } else { if (nsec > (lkpi_nsec2hz_max >> 32)) nsec = (lkpi_nsec2hz_max >> 32); } return (howmany(nsec * lkpi_nsec2hz_rem, lkpi_nsec2hz_div)); } static inline uint64_t -jiffies_to_nsecs(int j) +jiffies_to_nsecs(unsigned long j) { - return ((1000000000ULL / hz) * (uint64_t)(unsigned int)j); + return ((1000000000ULL / hz) * (uint64_t)j); } static inline uint64_t -jiffies_to_usecs(int j) +jiffies_to_usecs(unsigned long j) { - return ((1000000ULL / hz) * (uint64_t)(unsigned int)j); + return ((1000000ULL / hz) * (uint64_t)j); } static inline uint64_t get_jiffies_64(void) { - return ((uint64_t)(unsigned int)ticks); + return ((uint64_t)jiffies); } -static inline int -linux_timer_jiffies_until(int expires) +static inline unsigned long +linux_timer_jiffies_until(unsigned long expires) { - int delta = expires - jiffies; + unsigned long delta = expires - jiffies; + /* guard against already expired values */ - if (delta < 1) + if ((long)delta < 1) delta = 1; return (delta); } #endif /* _LINUXKPI_LINUX_JIFFIES_H_ */ diff --git a/sys/compat/linuxkpi/common/include/linux/sched.h b/sys/compat/linuxkpi/common/include/linux/sched.h index 80354493f955..3ad2f8e4ce8b 100644 --- a/sys/compat/linuxkpi/common/include/linux/sched.h +++ b/sys/compat/linuxkpi/common/include/linux/sched.h @@ -1,243 +1,243 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_SCHED_H_ #define _LINUXKPI_LINUX_SCHED_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#define MAX_SCHEDULE_TIMEOUT INT_MAX +#define MAX_SCHEDULE_TIMEOUT LONG_MAX #define TASK_RUNNING 0x0000 #define TASK_INTERRUPTIBLE 0x0001 #define TASK_UNINTERRUPTIBLE 0x0002 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) #define TASK_WAKING 0x0100 #define TASK_PARKED 0x0200 #define TASK_COMM_LEN (MAXCOMLEN + 1) struct seq_file; struct work_struct; struct task_struct { struct thread *task_thread; struct mm_struct *mm; linux_task_fn_t *task_fn; void *task_data; int task_ret; atomic_t usage; atomic_t state; atomic_t kthread_flags; pid_t pid; /* BSD thread ID */ const char *comm; void *bsd_ioctl_data; unsigned bsd_ioctl_len; struct completion parked; struct completion exited; #define TS_RCU_TYPE_MAX 2 TAILQ_ENTRY(task_struct) rcu_entry[TS_RCU_TYPE_MAX]; int rcu_recurse[TS_RCU_TYPE_MAX]; int bsd_interrupt_value; struct work_struct *work; /* current work struct, if set */ struct task_struct *group_leader; unsigned rcu_section[TS_RCU_TYPE_MAX]; unsigned int fpu_ctx_level; }; #define current ({ \ struct thread *__td = curthread; \ linux_set_current(__td); \ ((struct task_struct *)__td->td_lkpi_task); \ }) #define task_pid_group_leader(task) (task)->task_thread->td_proc->p_pid #define task_pid(task) ((task)->pid) #define task_pid_nr(task) ((task)->pid) #define task_pid_vnr(task) ((task)->pid) #define get_pid(x) (x) #define put_pid(x) do { } while (0) #define current_euid() (curthread->td_ucred->cr_uid) #define task_euid(task) ((task)->task_thread->td_ucred->cr_uid) #define get_task_state(task) atomic_read(&(task)->state) #define set_task_state(task, x) atomic_set(&(task)->state, (x)) #define __set_task_state(task, x) ((task)->state.counter = (x)) #define set_current_state(x) set_task_state(current, x) #define __set_current_state(x) __set_task_state(current, x) static inline void get_task_struct(struct task_struct *task) { atomic_inc(&task->usage); } static inline void put_task_struct(struct task_struct *task) { if (atomic_dec_and_test(&task->usage)) linux_free_current(task); } #define cond_resched() do { if (!cold) sched_relinquish(curthread); } while (0) #define yield() kern_yield(PRI_UNCHANGED) #define sched_yield() sched_relinquish(curthread) #define need_resched() (curthread->td_owepreempt || \ td_ast_pending(curthread, TDA_SCHED)) static inline int cond_resched_lock(spinlock_t *lock) { if (need_resched() == 0) return (0); spin_unlock(lock); cond_resched(); spin_lock(lock); return (1); } bool linux_signal_pending(struct task_struct *task); bool linux_fatal_signal_pending(struct task_struct *task); bool linux_signal_pending_state(long state, struct task_struct *task); void linux_send_sig(int signo, struct task_struct *task); #define signal_pending(task) linux_signal_pending(task) #define fatal_signal_pending(task) linux_fatal_signal_pending(task) #define signal_pending_state(state, task) \ linux_signal_pending_state(state, task) #define send_sig(signo, task, priv) do { \ CTASSERT((priv) == 0); \ linux_send_sig(signo, task); \ } while (0) -int linux_schedule_timeout(int timeout); +long linux_schedule_timeout(long timeout); static inline void linux_schedule_save_interrupt_value(struct task_struct *task, int value) { task->bsd_interrupt_value = value; } bool linux_task_exiting(struct task_struct *task); #define current_exiting() \ linux_task_exiting(current) static inline int linux_schedule_get_interrupt_value(struct task_struct *task) { int value = task->bsd_interrupt_value; task->bsd_interrupt_value = 0; return (value); } static inline void schedule(void) { (void)linux_schedule_timeout(MAX_SCHEDULE_TIMEOUT); } #define schedule_timeout(timeout) \ linux_schedule_timeout(timeout) #define schedule_timeout_killable(timeout) \ schedule_timeout_interruptible(timeout) #define schedule_timeout_interruptible(timeout) ({ \ set_current_state(TASK_INTERRUPTIBLE); \ schedule_timeout(timeout); \ }) #define schedule_timeout_uninterruptible(timeout) ({ \ set_current_state(TASK_UNINTERRUPTIBLE); \ schedule_timeout(timeout); \ }) #define io_schedule() schedule() #define io_schedule_timeout(timeout) schedule_timeout(timeout) static inline uint64_t local_clock(void) { struct timespec ts; nanotime(&ts); return ((uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec); } static inline const char * get_task_comm(char *buf, struct task_struct *task) { buf[0] = 0; /* buffer is too small */ return (task->comm); } static inline void sched_set_fifo(struct task_struct *t) { struct rtprio rtp; rtp.prio = (RTP_PRIO_MIN + RTP_PRIO_MAX) / 2; rtp.type = RTP_PRIO_FIFO; rtp_to_pri(&rtp, t->task_thread); } static inline void sched_set_fifo_low(struct task_struct *t) { struct rtprio rtp; rtp.prio = RTP_PRIO_MAX; /* lowest priority */ rtp.type = RTP_PRIO_FIFO; rtp_to_pri(&rtp, t->task_thread); } #endif /* _LINUXKPI_LINUX_SCHED_H_ */ diff --git a/sys/compat/linuxkpi/common/include/linux/timer.h b/sys/compat/linuxkpi/common/include/linux/timer.h index 8bea082c3e6c..a635f0faea59 100644 --- a/sys/compat/linuxkpi/common/include/linux/timer.h +++ b/sys/compat/linuxkpi/common/include/linux/timer.h @@ -1,94 +1,94 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_TIMER_H_ #define _LINUXKPI_LINUX_TIMER_H_ #include #include #include #include struct timer_list { struct callout callout; union { void (*function) (unsigned long); /* < v4.15 */ void (*function_415) (struct timer_list *); }; unsigned long data; - int expires; + unsigned long expires; }; extern unsigned long linux_timer_hz_mask; #define TIMER_IRQSAFE 0x0001 #define from_timer(var, arg, field) \ container_of(arg, typeof(*(var)), field) #define timer_setup(timer, func, flags) do { \ CTASSERT(((flags) & ~TIMER_IRQSAFE) == 0); \ (timer)->function_415 = (func); \ (timer)->data = (unsigned long)(timer); \ callout_init(&(timer)->callout, 1); \ } while (0) #define setup_timer(timer, func, dat) do { \ (timer)->function = (func); \ (timer)->data = (dat); \ callout_init(&(timer)->callout, 1); \ } while (0) #define __setup_timer(timer, func, dat, flags) do { \ CTASSERT(((flags) & ~TIMER_IRQSAFE) == 0); \ setup_timer(timer, func, dat); \ } while (0) #define init_timer(timer) do { \ (timer)->function = NULL; \ (timer)->data = 0; \ callout_init(&(timer)->callout, 1); \ } while (0) -extern int mod_timer(struct timer_list *, int); +extern int mod_timer(struct timer_list *, unsigned long); extern void add_timer(struct timer_list *); extern void add_timer_on(struct timer_list *, int cpu); extern int del_timer(struct timer_list *); extern int del_timer_sync(struct timer_list *); extern int timer_delete_sync(struct timer_list *); extern int timer_shutdown_sync(struct timer_list *); #define timer_pending(timer) callout_pending(&(timer)->callout) #define round_jiffies(j) \ - ((int)(((j) + linux_timer_hz_mask) & ~linux_timer_hz_mask)) + ((unsigned long)(((j) + linux_timer_hz_mask) & ~linux_timer_hz_mask)) #define round_jiffies_relative(j) round_jiffies(j) #define round_jiffies_up(j) round_jiffies(j) #define round_jiffies_up_relative(j) round_jiffies_up(j) #endif /* _LINUXKPI_LINUX_TIMER_H_ */ diff --git a/sys/compat/linuxkpi/common/include/linux/wait.h b/sys/compat/linuxkpi/common/include/linux/wait.h index 309c7816aa7b..bd496793e27e 100644 --- a/sys/compat/linuxkpi/common/include/linux/wait.h +++ b/sys/compat/linuxkpi/common/include/linux/wait.h @@ -1,311 +1,311 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * Copyright (c) 2017 Mark Johnston * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_WAIT_H_ #define _LINUXKPI_LINUX_WAIT_H_ #include #include #include #include #include #include #include #define SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active) #define might_sleep() \ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()") #define might_sleep_if(cond) do { \ if (cond) { might_sleep(); } \ } while (0) struct wait_queue; struct wait_queue_head; #define wait_queue_entry wait_queue typedef struct wait_queue wait_queue_t; typedef struct wait_queue_entry wait_queue_entry_t; typedef struct wait_queue_head wait_queue_head_t; typedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *); /* * Many API consumers directly reference these fields and those of * wait_queue_head. */ struct wait_queue { unsigned int flags; /* always 0 */ void *private; wait_queue_func_t *func; union { struct list_head task_list; /* < v4.13 */ struct list_head entry; /* >= v4.13 */ }; }; struct wait_queue_head { spinlock_t lock; union { struct list_head task_list; /* < v4.13 */ struct list_head head; /* >= v4.13 */ }; }; /* * This function is referenced by at least one DRM driver, so it may not be * renamed and furthermore must be the default wait queue callback. */ extern wait_queue_func_t autoremove_wake_function; extern wait_queue_func_t default_wake_function; #define DEFINE_WAIT_FUNC(name, function) \ wait_queue_t name = { \ .private = current, \ .func = function, \ .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \ } #define DEFINE_WAIT(name) \ DEFINE_WAIT_FUNC(name, autoremove_wake_function) #define DECLARE_WAITQUEUE(name, task) \ wait_queue_t name = { \ .private = task, \ .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \ } #define DECLARE_WAIT_QUEUE_HEAD(name) \ wait_queue_head_t name = { \ .task_list = LINUX_LIST_HEAD_INIT(name.task_list), \ }; \ MTX_SYSINIT(name, &(name).lock, spin_lock_name("wqhead"), MTX_DEF) #define init_waitqueue_head(wqh) do { \ mtx_init(&(wqh)->lock, spin_lock_name("wqhead"), \ NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS); \ INIT_LIST_HEAD(&(wqh)->task_list); \ } while (0) #define __init_waitqueue_head(wqh, name, lk) init_waitqueue_head(wqh) void linux_init_wait_entry(wait_queue_t *, int); void linux_wake_up(wait_queue_head_t *, unsigned int, int, bool); #define init_wait_entry(wq, flags) \ linux_init_wait_entry(wq, flags) #define wake_up(wqh) \ linux_wake_up(wqh, TASK_NORMAL, 1, false) #define wake_up_all(wqh) \ linux_wake_up(wqh, TASK_NORMAL, 0, false) #define wake_up_locked(wqh) \ linux_wake_up(wqh, TASK_NORMAL, 1, true) #define wake_up_all_locked(wqh) \ linux_wake_up(wqh, TASK_NORMAL, 0, true) #define wake_up_interruptible(wqh) \ linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false) #define wake_up_interruptible_all(wqh) \ linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false) -int linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, int, +int linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, long, unsigned int, spinlock_t *); /* * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if * cond is true after timeout, remaining jiffies (> 0) if cond is true before * timeout. */ #define __wait_event_common(wqh, cond, timeout, state, lock) ({ \ DEFINE_WAIT(__wq); \ - const int __timeout = ((int)(timeout)) < 1 ? 1 : (timeout); \ - int __start = ticks; \ - int __ret = 0; \ + const long __timeout = ((long)(timeout)) < 1 ? 1 : (timeout); \ + long __start = jiffies; \ + long __ret = 0; \ \ for (;;) { \ linux_prepare_to_wait(&(wqh), &__wq, state); \ if (cond) \ break; \ __ret = linux_wait_event_common(&(wqh), &__wq, \ __timeout, state, lock); \ if (__ret != 0) \ break; \ } \ linux_finish_wait(&(wqh), &__wq); \ if (__timeout != MAX_SCHEDULE_TIMEOUT) { \ if (__ret == -EWOULDBLOCK) \ __ret = !!(cond); \ else if (__ret != -ERESTARTSYS) { \ - __ret = __timeout + __start - ticks; \ + __ret = __timeout + __start - jiffies; \ /* range check return value */ \ if (__ret < 1) \ __ret = 1; \ else if (__ret > __timeout) \ __ret = __timeout; \ } \ } \ __ret; \ }) #define wait_event(wqh, cond) do { \ (void) __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ TASK_UNINTERRUPTIBLE, NULL); \ } while (0) #define wait_event_timeout(wqh, cond, timeout) ({ \ __wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE, \ NULL); \ }) #define wait_event_killable(wqh, cond) ({ \ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ TASK_INTERRUPTIBLE, NULL); \ }) #define wait_event_interruptible(wqh, cond) ({ \ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ TASK_INTERRUPTIBLE, NULL); \ }) #define wait_event_interruptible_timeout(wqh, cond, timeout) ({ \ __wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE, \ NULL); \ }) /* * Wait queue is already locked. */ #define wait_event_interruptible_locked(wqh, cond) ({ \ int __ret; \ \ spin_unlock(&(wqh).lock); \ __ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ TASK_INTERRUPTIBLE, NULL); \ spin_lock(&(wqh).lock); \ __ret; \ }) /* * The passed spinlock is held when testing the condition. */ #define wait_event_interruptible_lock_irq(wqh, cond, lock) ({ \ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ TASK_INTERRUPTIBLE, &(lock)); \ }) /* * The passed spinlock is held when testing the condition. */ #define wait_event_lock_irq(wqh, cond, lock) ({ \ __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ TASK_UNINTERRUPTIBLE, &(lock)); \ }) static inline void __add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) { list_add(&wq->task_list, &wqh->task_list); } static inline void add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) { spin_lock(&wqh->lock); __add_wait_queue(wqh, wq); spin_unlock(&wqh->lock); } static inline void __add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq) { list_add_tail(&wq->task_list, &wqh->task_list); } static inline void __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wq) { list_add_tail(&wq->entry, &wqh->head); } static inline void __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) { list_del(&wq->task_list); } static inline void remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) { spin_lock(&wqh->lock); __remove_wait_queue(wqh, wq); spin_unlock(&wqh->lock); } bool linux_waitqueue_active(wait_queue_head_t *); #define waitqueue_active(wqh) linux_waitqueue_active(wqh) void linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int); void linux_finish_wait(wait_queue_head_t *, wait_queue_t *); #define prepare_to_wait(wqh, wq, state) linux_prepare_to_wait(wqh, wq, state) #define finish_wait(wqh, wq) linux_finish_wait(wqh, wq) void linux_wake_up_bit(void *, int); -int linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, int); +int linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, long); void linux_wake_up_atomic_t(atomic_t *); int linux_wait_on_atomic_t(atomic_t *, unsigned int); #define wake_up_bit(word, bit) linux_wake_up_bit(word, bit) #define wait_on_bit(word, bit, state) \ linux_wait_on_bit_timeout(word, bit, state, MAX_SCHEDULE_TIMEOUT) #define wait_on_bit_timeout(word, bit, state, timeout) \ linux_wait_on_bit_timeout(word, bit, state, timeout) #define wake_up_atomic_t(a) linux_wake_up_atomic_t(a) /* * All existing callers have a cb that just schedule()s. To avoid adding * complexity, just emulate that internally. The prototype is different so that * callers must be manually modified; a cb that does something other than call * schedule() will require special treatment. */ #define wait_on_atomic_t(a, state) linux_wait_on_atomic_t(a, state) struct task_struct; bool linux_wake_up_state(struct task_struct *, unsigned int); #define wake_up_process(task) linux_wake_up_state(task, TASK_NORMAL) #define wake_up_state(task, state) linux_wake_up_state(task, state) #endif /* _LINUXKPI_LINUX_WAIT_H_ */ diff --git a/sys/compat/linuxkpi/common/include/linux/workqueue.h b/sys/compat/linuxkpi/common/include/linux/workqueue.h index 7e740f0f1dfc..25ee861d3015 100644 --- a/sys/compat/linuxkpi/common/include/linux/workqueue.h +++ b/sys/compat/linuxkpi/common/include/linux/workqueue.h @@ -1,267 +1,267 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _LINUXKPI_LINUX_WORKQUEUE_H_ #define _LINUXKPI_LINUX_WORKQUEUE_H_ #include #include #include #include #include #include #include #include #include #define WORK_CPU_UNBOUND MAXCPU #define WQ_UNBOUND (1 << 0) #define WQ_HIGHPRI (1 << 1) struct work_struct; typedef void (*work_func_t)(struct work_struct *); struct work_exec { TAILQ_ENTRY(work_exec) entry; struct work_struct *target; }; struct workqueue_struct { struct taskqueue *taskqueue; struct mtx exec_mtx; TAILQ_HEAD(, work_exec) exec_head; atomic_t draining; }; #define WQ_EXEC_LOCK(wq) mtx_lock(&(wq)->exec_mtx) #define WQ_EXEC_UNLOCK(wq) mtx_unlock(&(wq)->exec_mtx) struct work_struct { struct task work_task; struct workqueue_struct *work_queue; work_func_t func; atomic_t state; }; struct rcu_work { struct work_struct work; struct rcu_head rcu; struct workqueue_struct *wq; }; #define DECLARE_WORK(name, fn) \ struct work_struct name; \ static void name##_init(void *arg) \ { \ INIT_WORK(&name, fn); \ } \ SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, name##_init, NULL) struct delayed_work { struct work_struct work; struct { struct callout callout; struct mtx mtx; - int expires; + long expires; } timer; }; #define DECLARE_DELAYED_WORK(name, fn) \ struct delayed_work name; \ static void __linux_delayed_ ## name ## _init(void *arg) \ { \ linux_init_delayed_work(&name, fn); \ } \ SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, \ __linux_delayed_ ## name##_init, NULL) static inline struct delayed_work * to_delayed_work(struct work_struct *work) { return (container_of(work, struct delayed_work, work)); } #define INIT_WORK(work, fn) \ do { \ (work)->func = (fn); \ (work)->work_queue = NULL; \ atomic_set(&(work)->state, 0); \ TASK_INIT(&(work)->work_task, 0, linux_work_fn, (work)); \ } while (0) #define INIT_RCU_WORK(_work, _fn) \ INIT_WORK(&(_work)->work, (_fn)) #define INIT_WORK_ONSTACK(work, fn) \ INIT_WORK(work, fn) #define INIT_DELAYED_WORK(dwork, fn) \ linux_init_delayed_work(dwork, fn) #define INIT_DELAYED_WORK_ONSTACK(dwork, fn) \ linux_init_delayed_work(dwork, fn) #define INIT_DEFERRABLE_WORK(dwork, fn) \ INIT_DELAYED_WORK(dwork, fn) #define flush_scheduled_work() \ taskqueue_drain_all(system_wq->taskqueue) #define queue_work(wq, work) \ linux_queue_work_on(WORK_CPU_UNBOUND, wq, work) #define schedule_work(work) \ linux_queue_work_on(WORK_CPU_UNBOUND, system_wq, work) #define queue_delayed_work(wq, dwork, delay) \ linux_queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay) #define schedule_delayed_work_on(cpu, dwork, delay) \ linux_queue_delayed_work_on(cpu, system_wq, dwork, delay) #define queue_work_on(cpu, wq, work) \ linux_queue_work_on(cpu, wq, work) #define schedule_delayed_work(dwork, delay) \ linux_queue_delayed_work_on(WORK_CPU_UNBOUND, system_wq, dwork, delay) #define queue_delayed_work_on(cpu, wq, dwork, delay) \ linux_queue_delayed_work_on(cpu, wq, dwork, delay) #define create_singlethread_workqueue(name) \ linux_create_workqueue_common(name, 1) #define create_workqueue(name) \ linux_create_workqueue_common(name, mp_ncpus) #define alloc_ordered_workqueue(name, flags) \ linux_create_workqueue_common(name, 1) #define alloc_workqueue(name, flags, max_active) \ linux_create_workqueue_common(name, max_active) #define flush_workqueue(wq) \ taskqueue_drain_all((wq)->taskqueue) #define drain_workqueue(wq) do { \ atomic_inc(&(wq)->draining); \ taskqueue_drain_all((wq)->taskqueue); \ atomic_dec(&(wq)->draining); \ } while (0) #define mod_delayed_work(wq, dwork, delay) ({ \ bool __retval; \ __retval = linux_cancel_delayed_work(dwork); \ linux_queue_delayed_work_on(WORK_CPU_UNBOUND, \ wq, dwork, delay); \ __retval; \ }) #define delayed_work_pending(dwork) \ linux_work_pending(&(dwork)->work) #define cancel_work(work) \ linux_cancel_work(work) #define cancel_delayed_work(dwork) \ linux_cancel_delayed_work(dwork) #define cancel_work_sync(work) \ linux_cancel_work_sync(work) #define cancel_delayed_work_sync(dwork) \ linux_cancel_delayed_work_sync(dwork) #define flush_work(work) \ linux_flush_work(work) #define queue_rcu_work(wq, rwork) \ linux_queue_rcu_work(wq, rwork) #define flush_rcu_work(rwork) \ linux_flush_rcu_work(rwork) #define flush_delayed_work(dwork) \ linux_flush_delayed_work(dwork) #define work_pending(work) \ linux_work_pending(work) #define work_busy(work) \ linux_work_busy(work) #define destroy_work_on_stack(work) \ do { } while (0) #define destroy_delayed_work_on_stack(dwork) \ do { } while (0) #define destroy_workqueue(wq) \ linux_destroy_workqueue(wq) #define current_work() \ linux_current_work() /* prototypes */ extern struct workqueue_struct *system_wq; extern struct workqueue_struct *system_long_wq; extern struct workqueue_struct *system_unbound_wq; extern struct workqueue_struct *system_highpri_wq; extern struct workqueue_struct *system_power_efficient_wq; extern void linux_init_delayed_work(struct delayed_work *, work_func_t); extern void linux_work_fn(void *, int); extern void linux_delayed_work_fn(void *, int); extern struct workqueue_struct *linux_create_workqueue_common(const char *, int); extern void linux_destroy_workqueue(struct workqueue_struct *); extern bool linux_queue_work_on(int cpu, struct workqueue_struct *, struct work_struct *); extern bool linux_queue_delayed_work_on(int cpu, struct workqueue_struct *, - struct delayed_work *, unsigned delay); + struct delayed_work *, unsigned long delay); extern bool linux_cancel_work(struct work_struct *); extern bool linux_cancel_delayed_work(struct delayed_work *); extern bool linux_cancel_work_sync(struct work_struct *); extern bool linux_cancel_delayed_work_sync(struct delayed_work *); extern bool linux_flush_work(struct work_struct *); extern bool linux_flush_delayed_work(struct delayed_work *); extern bool linux_work_pending(struct work_struct *); extern bool linux_work_busy(struct work_struct *); extern struct work_struct *linux_current_work(void); extern bool linux_queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); extern bool linux_flush_rcu_work(struct rcu_work *rwork); static inline bool queue_work_node(int node __unused, struct workqueue_struct *wq, struct work_struct *work) { return (queue_work(wq, work)); } #endif /* _LINUXKPI_LINUX_WORKQUEUE_H_ */ diff --git a/sys/compat/linuxkpi/common/src/linux_80211.c b/sys/compat/linuxkpi/common/src/linux_80211.c index 1a8b32bb16f7..357f451a3f1a 100644 --- a/sys/compat/linuxkpi/common/src/linux_80211.c +++ b/sys/compat/linuxkpi/common/src/linux_80211.c @@ -1,8202 +1,8202 @@ /*- * Copyright (c) 2020-2025 The FreeBSD Foundation * Copyright (c) 2020-2025 Bjoern A. Zeeb * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Public functions are called linuxkpi_*(). * Internal (static) functions are called lkpi_*(). * * The internal structures holding metadata over public structures are also * called lkpi_xxx (usually with a member at the end called xxx). * Note: we do not replicate the structure names but the general variable names * for these (e.g., struct hw -> struct lkpi_hw, struct sta -> struct lkpi_sta). * There are macros to access one from the other. * We call the internal versions lxxx (e.g., hw -> lhw, sta -> lsta). */ /* * TODO: * - lots :) * - HW_CRYPTO: we need a "keystore" and an ordered list for suspend/resume. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LINUXKPI_NET80211 #include #include #include #include "linux_80211.h" #define LKPI_80211_WME #define LKPI_80211_HW_CRYPTO #define LKPI_80211_HT #define LKPI_80211_VHT #if defined(LKPI_80211_VHT) && !defined(LKPI_80211_HT) #define LKPI_80211_HT #endif #if defined(LKPI_80211_HT) && !defined(LKPI_80211_HW_CRYPTO) #define LKPI_80211_HW_CRYPTO #endif static MALLOC_DEFINE(M_LKPI80211, "lkpi80211", "LinuxKPI 80211 compat"); /* XXX-BZ really want this and others in queue.h */ #define TAILQ_ELEM_INIT(elm, field) do { \ (elm)->field.tqe_next = NULL; \ (elm)->field.tqe_prev = NULL; \ } while (0) /* -------------------------------------------------------------------------- */ SYSCTL_DECL(_compat_linuxkpi); SYSCTL_NODE(_compat_linuxkpi, OID_AUTO, 80211, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "LinuxKPI 802.11 compatibility layer"); #if defined(LKPI_80211_HW_CRYPTO) static bool lkpi_hwcrypto = false; SYSCTL_BOOL(_compat_linuxkpi_80211, OID_AUTO, hw_crypto, CTLFLAG_RDTUN, &lkpi_hwcrypto, 0, "Enable LinuxKPI 802.11 hardware crypto offload"); static bool lkpi_hwcrypto_tkip = false; SYSCTL_BOOL(_compat_linuxkpi_80211, OID_AUTO, tkip, CTLFLAG_RDTUN, &lkpi_hwcrypto_tkip, 0, "Enable LinuxKPI 802.11 TKIP crypto offload"); #endif /* Keep public for as long as header files are using it too. */ int linuxkpi_debug_80211; #ifdef LINUXKPI_DEBUG_80211 SYSCTL_INT(_compat_linuxkpi_80211, OID_AUTO, debug, CTLFLAG_RWTUN, &linuxkpi_debug_80211, 0, "LinuxKPI 802.11 debug level"); #define UNIMPLEMENTED if (linuxkpi_debug_80211 & D80211_TODO) \ printf("XXX-TODO %s:%d: UNIMPLEMENTED\n", __func__, __LINE__) #define TRACEOK() if (linuxkpi_debug_80211 & D80211_TRACEOK) \ printf("XXX-TODO %s:%d: TRACEPOINT\n", __func__, __LINE__) #else #define UNIMPLEMENTED do { } while (0) #define TRACEOK() do { } while (0) #endif /* #define PREP_TX_INFO_DURATION (IEEE80211_TRANS_WAIT * 1000) */ #ifndef PREP_TX_INFO_DURATION #define PREP_TX_INFO_DURATION 0 /* Let the driver do its thing. */ #endif /* This is DSAP | SSAP | CTRL | ProtoID/OrgCode{3}. */ const uint8_t rfc1042_header[6] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; /* IEEE 802.11-05/0257r1 */ const uint8_t bridge_tunnel_header[6] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; /* IEEE 802.11e Table 20i-UP-to-AC mappings. */ static const uint8_t ieee80211e_up_to_ac[] = { IEEE80211_AC_BE, IEEE80211_AC_BK, IEEE80211_AC_BK, IEEE80211_AC_BE, IEEE80211_AC_VI, IEEE80211_AC_VI, IEEE80211_AC_VO, IEEE80211_AC_VO, #if 0 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ #endif }; const struct cfg80211_ops linuxkpi_mac80211cfgops = { /* * XXX TODO need a "glue layer" to link cfg80211 ops to * mac80211 and to the driver or net80211. * Can we pass some on 1:1? Need to compare the (*f)(). */ }; #if 0 static struct lkpi_sta *lkpi_find_lsta_by_ni(struct lkpi_vif *, struct ieee80211_node *); #endif static void lkpi_80211_txq_tx_one(struct lkpi_sta *, struct mbuf *); static void lkpi_80211_txq_task(void *, int); static void lkpi_80211_lhw_rxq_task(void *, int); static void lkpi_ieee80211_free_skb_mbuf(void *); #ifdef LKPI_80211_WME static int lkpi_wme_update(struct lkpi_hw *, struct ieee80211vap *, bool); #endif static const char * lkpi_rate_info_bw_to_str(enum rate_info_bw bw) { switch (bw) { case RATE_INFO_BW_20: return ("20"); break; case RATE_INFO_BW_5: return ("5"); break; case RATE_INFO_BW_10: return ("10"); break; case RATE_INFO_BW_40: return ("40"); break; case RATE_INFO_BW_80: return ("80"); break; case RATE_INFO_BW_160: return ("160"); break; case RATE_INFO_BW_HE_RU: IMPROVE("nl80211_he_ru_alloc"); return ("HE_RU"); break; case RATE_INFO_BW_320: return ("320"); break; case RATE_INFO_BW_EHT_RU: IMPROVE("nl80211_eht_ru_alloc"); return ("EHT_RU"); break; default: return ("?"); break; } } static void lkpi_nl80211_sta_info_to_str(struct sbuf *s, const char *prefix, const uint64_t flags) { int bit, i; sbuf_printf(s, "%s %#010jx", prefix, flags); i = 0; for (bit = 0; bit < BITS_PER_TYPE(flags); bit++) { if ((flags & BIT_ULL(bit)) == 0) continue; #define EXPAND_CASE(_flag) \ case NL80211_STA_INFO_ ## _flag: \ sbuf_printf(s, "%c%s", (i == 0) ? '<' : ',', #_flag); \ i++; \ break; switch (bit) { EXPAND_CASE(BEACON_RX) EXPAND_CASE(BEACON_SIGNAL_AVG) EXPAND_CASE(BSS_PARAM) EXPAND_CASE(CHAIN_SIGNAL) EXPAND_CASE(CHAIN_SIGNAL_AVG) EXPAND_CASE(CONNECTED_TIME) EXPAND_CASE(INACTIVE_TIME) EXPAND_CASE(SIGNAL) EXPAND_CASE(SIGNAL_AVG) EXPAND_CASE(STA_FLAGS) EXPAND_CASE(RX_BITRATE) EXPAND_CASE(RX_PACKETS) EXPAND_CASE(RX_BYTES) EXPAND_CASE(RX_DROP_MISC) EXPAND_CASE(TX_BITRATE) EXPAND_CASE(TX_PACKETS) EXPAND_CASE(TX_BYTES) EXPAND_CASE(TX_BYTES64) EXPAND_CASE(RX_BYTES64) EXPAND_CASE(TX_FAILED) EXPAND_CASE(TX_RETRIES) EXPAND_CASE(RX_DURATION) EXPAND_CASE(TX_DURATION) EXPAND_CASE(ACK_SIGNAL) EXPAND_CASE(ACK_SIGNAL_AVG) default: sbuf_printf(s, "%c?%d", (i == 0) ? '<' : ',', bit); break; } } #undef EXPAND_CASE if (i > 0) sbuf_printf(s, ">"); sbuf_printf(s, "\n"); } static int lkpi_80211_dump_stas(SYSCTL_HANDLER_ARGS) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct ieee80211vap *vap; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct lkpi_sta *lsta; struct ieee80211_sta *sta; struct station_info sinfo; struct sbuf s; int error; if (req->newptr) return (EPERM); lvif = (struct lkpi_vif *)arg1; vif = LVIF_TO_VIF(lvif); vap = LVIF_TO_VAP(lvif); lhw = vap->iv_ic->ic_softc; hw = LHW_TO_HW(lhw); sbuf_new_for_sysctl(&s, NULL, 1024, req); wiphy_lock(hw->wiphy); list_for_each_entry(lsta, &lvif->lsta_list, lsta_list) { sta = LSTA_TO_STA(lsta); sbuf_putc(&s, '\n'); sbuf_printf(&s, "lsta %p sta %p added_to_drv %d\n", lsta, sta, lsta->added_to_drv); memset(&sinfo, 0, sizeof(sinfo)); error = lkpi_80211_mo_sta_statistics(hw, vif, sta, &sinfo); if (error == EEXIST) /* Not added to driver. */ continue; if (error == ENOTSUPP) { sbuf_printf(&s, " sta_statistics not supported\n"); continue; } if (error != 0) { sbuf_printf(&s, " sta_statistics failed: %d\n", error); continue; } /* If no RX_BITRATE is reported, try to fill it in from the lsta sinfo. */ if ((sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) == 0 && (lsta->sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) != 0) { memcpy(&sinfo.rxrate, &lsta->sinfo.rxrate, sizeof(sinfo.rxrate)); sinfo.filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); } lkpi_nl80211_sta_info_to_str(&s, " nl80211_sta_info (valid fields)", sinfo.filled); sbuf_printf(&s, " connected_time %u inactive_time %u\n", sinfo.connected_time, sinfo.inactive_time); sbuf_printf(&s, " rx_bytes %ju rx_packets %u rx_dropped_misc %u\n", (uintmax_t)sinfo.rx_bytes, sinfo.rx_packets, sinfo.rx_dropped_misc); sbuf_printf(&s, " rx_duration %ju rx_beacon %u rx_beacon_signal_avg %d\n", (uintmax_t)sinfo.rx_duration, sinfo.rx_beacon, (int8_t)sinfo.rx_beacon_signal_avg); sbuf_printf(&s, " tx_bytes %ju tx_packets %u tx_failed %u\n", (uintmax_t)sinfo.tx_bytes, sinfo.tx_packets, sinfo.tx_failed); sbuf_printf(&s, " tx_duration %ju tx_retries %u\n", (uintmax_t)sinfo.tx_duration, sinfo.tx_retries); sbuf_printf(&s, " signal %d signal_avg %d ack_signal %d avg_ack_signal %d\n", sinfo.signal, sinfo.signal_avg, sinfo.ack_signal, sinfo.avg_ack_signal); sbuf_printf(&s, " generation %d assoc_req_ies_len %zu chains %d\n", sinfo.generation, sinfo.assoc_req_ies_len, sinfo.chains); for (int i = 0; i < sinfo.chains && i < IEEE80211_MAX_CHAINS; i++) { sbuf_printf(&s, " chain[%d] signal %d signal_avg %d\n", i, (int8_t)sinfo.chain_signal[i], (int8_t)sinfo.chain_signal_avg[i]); } /* assoc_req_ies, bss_param, sta_flags */ sbuf_printf(&s, " rxrate: flags %b bw %u(%s) legacy %u kbit/s mcs %u nss %u\n", sinfo.rxrate.flags, CFG80211_RATE_INFO_FLAGS_BITS, sinfo.rxrate.bw, lkpi_rate_info_bw_to_str(sinfo.rxrate.bw), sinfo.rxrate.legacy * 100, sinfo.rxrate.mcs, sinfo.rxrate.nss); sbuf_printf(&s, " he_dcm %u he_gi %u he_ru_alloc %u eht_gi %u\n", sinfo.rxrate.he_dcm, sinfo.rxrate.he_gi, sinfo.rxrate.he_ru_alloc, sinfo.rxrate.eht_gi); sbuf_printf(&s, " txrate: flags %b bw %u(%s) legacy %u kbit/s mcs %u nss %u\n", sinfo.txrate.flags, CFG80211_RATE_INFO_FLAGS_BITS, sinfo.txrate.bw, lkpi_rate_info_bw_to_str(sinfo.txrate.bw), sinfo.txrate.legacy * 100, sinfo.txrate.mcs, sinfo.txrate.nss); sbuf_printf(&s, " he_dcm %u he_gi %u he_ru_alloc %u eht_gi %u\n", sinfo.txrate.he_dcm, sinfo.txrate.he_gi, sinfo.txrate.he_ru_alloc, sinfo.txrate.eht_gi); } wiphy_unlock(hw->wiphy); sbuf_finish(&s); sbuf_delete(&s); return (0); } static enum ieee80211_sta_rx_bw lkpi_cw_to_rx_bw(enum nl80211_chan_width cw) { switch (cw) { case NL80211_CHAN_WIDTH_320: return (IEEE80211_STA_RX_BW_320); case NL80211_CHAN_WIDTH_160: case NL80211_CHAN_WIDTH_80P80: return (IEEE80211_STA_RX_BW_160); case NL80211_CHAN_WIDTH_80: return (IEEE80211_STA_RX_BW_80); case NL80211_CHAN_WIDTH_40: return (IEEE80211_STA_RX_BW_40); case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_20_NOHT: return (IEEE80211_STA_RX_BW_20); case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: /* Unsupported input. */ return (IEEE80211_STA_RX_BW_20); } } static enum nl80211_chan_width lkpi_rx_bw_to_cw(enum ieee80211_sta_rx_bw rx_bw) { switch (rx_bw) { case IEEE80211_STA_RX_BW_20: return (NL80211_CHAN_WIDTH_20); /* _NOHT */ case IEEE80211_STA_RX_BW_40: return (NL80211_CHAN_WIDTH_40); case IEEE80211_STA_RX_BW_80: return (NL80211_CHAN_WIDTH_80); case IEEE80211_STA_RX_BW_160: return (NL80211_CHAN_WIDTH_160); /* 80P80 */ case IEEE80211_STA_RX_BW_320: return (NL80211_CHAN_WIDTH_320); } } static void lkpi_sync_chanctx_cw_from_rx_bw(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct ieee80211_chanctx_conf *chanctx_conf; enum ieee80211_sta_rx_bw old_bw; uint32_t changed; chanctx_conf = rcu_dereference_protected(vif->bss_conf.chanctx_conf, lockdep_is_held(&hw->wiphy->mtx)); if (chanctx_conf == NULL) return; old_bw = lkpi_cw_to_rx_bw(chanctx_conf->def.width); if (old_bw == sta->deflink.bandwidth) return; chanctx_conf->def.width = lkpi_rx_bw_to_cw(sta->deflink.bandwidth); if (chanctx_conf->def.width == NL80211_CHAN_WIDTH_20 && !sta->deflink.ht_cap.ht_supported) chanctx_conf->def.width = NL80211_CHAN_WIDTH_20_NOHT; chanctx_conf->min_def = chanctx_conf->def; vif->bss_conf.chanreq.oper.width = chanctx_conf->def.width; changed = IEEE80211_CHANCTX_CHANGE_MIN_WIDTH; changed |= IEEE80211_CHANCTX_CHANGE_WIDTH; lkpi_80211_mo_change_chanctx(hw, chanctx_conf, changed); } #if defined(LKPI_80211_HT) static void lkpi_sta_sync_ht_from_ni(struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_node *ni) { struct ieee80211vap *vap; uint8_t *ie; struct ieee80211_ht_cap *htcap; int i, rx_nss; if ((ni->ni_flags & IEEE80211_NODE_HT) == 0) { sta->deflink.ht_cap.ht_supported = false; return; } sta->deflink.ht_cap.ht_supported = true; /* htcap->ampdu_params_info */ vap = ni->ni_vap; sta->deflink.ht_cap.ampdu_density = _IEEE80211_MASKSHIFT(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY); if (sta->deflink.ht_cap.ampdu_density > vap->iv_ampdu_density) sta->deflink.ht_cap.ampdu_density = vap->iv_ampdu_density; sta->deflink.ht_cap.ampdu_factor = _IEEE80211_MASKSHIFT(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU); if (sta->deflink.ht_cap.ampdu_factor > vap->iv_ampdu_rxmax) sta->deflink.ht_cap.ampdu_factor = vap->iv_ampdu_rxmax; ie = ni->ni_ies.htcap_ie; KASSERT(ie != NULL, ("%s: HT but no htcap_ie on ni %p\n", __func__, ni)); if (ie[0] == IEEE80211_ELEMID_VENDOR) ie += 4; ie += 2; htcap = (struct ieee80211_ht_cap *)ie; sta->deflink.ht_cap.cap = htcap->cap_info; sta->deflink.ht_cap.mcs = htcap->mcs; if ((sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) != 0 && IEEE80211_IS_CHAN_HT40(ni->ni_chan)) sta->deflink.bandwidth = IEEE80211_STA_RX_BW_40; else sta->deflink.bandwidth = IEEE80211_STA_RX_BW_20; /* * 802.11n-2009 20.6 Parameters for HT MCSs gives the mandatory/ * optional MCS for Nss=1..4. We need to check the first four * MCS sets from the Rx MCS Bitmask; then there is MCS 32 and * MCS33.. is UEQM. */ rx_nss = 0; for (i = 0; i < 4; i++) { if (htcap->mcs.rx_mask[i]) rx_nss++; } if (rx_nss > 0) sta->deflink.rx_nss = rx_nss; IMPROVE("sta->wme"); if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_MAX_AMSDU) sta->deflink.agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_7935; else sta->deflink.agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_3839; sta->deflink.agg.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA; #ifdef __handled_by_driver__ /* iwlwifi only? actually unused? */ for (i = 0; i < nitems(sta.deflink.agg.max_tid_amsdu_len); i++) { sta->deflink.agg.max_tid_amsdu_len[j] = ; } #endif } #endif #if defined(LKPI_80211_VHT) static void lkpi_sta_sync_vht_from_ni(struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_node *ni) { uint32_t width; int rx_nss; uint16_t rx_mcs_map; uint8_t mcs; if ((ni->ni_flags & IEEE80211_NODE_VHT) == 0 || !IEEE80211_IS_CHAN_VHT_5GHZ(ni->ni_chan)) { sta->deflink.vht_cap.vht_supported = false; return; } sta->deflink.vht_cap.vht_supported = true; sta->deflink.vht_cap.cap = ni->ni_vhtcap; sta->deflink.vht_cap.vht_mcs = ni->ni_vht_mcsinfo; /* * If VHT20/40 are selected do not update the bandwidth * from HT but stya on VHT. */ if (ni->ni_vht_chanwidth == IEEE80211_VHT_CHANWIDTH_USE_HT) goto skip_bw; width = (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK); switch (width) { #if 0 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: sta->deflink.bandwidth = IEEE80211_STA_RX_BW_160; break; #endif default: /* Check if we do support 160Mhz somehow after all. */ #if 0 if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) != 0) sta->deflink.bandwidth = IEEE80211_STA_RX_BW_160; else #endif sta->deflink.bandwidth = IEEE80211_STA_RX_BW_80; } skip_bw: rx_nss = 0; rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map; for (int i = 7; i >= 0; i--) { mcs = rx_mcs_map >> (2 * i); mcs &= 0x3; if (mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) { rx_nss = i + 1; break; } } if (rx_nss > 0) sta->deflink.rx_nss = rx_nss; switch (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: sta->deflink.agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454; break; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: sta->deflink.agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_7991; break; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895: default: sta->deflink.agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_3895; break; } } #endif static void lkpi_sta_sync_from_ni(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_node *ni, bool updchnctx) { #if defined(LKPI_80211_HT) lkpi_sta_sync_ht_from_ni(vif, sta, ni); #endif #if defined(LKPI_80211_VHT) lkpi_sta_sync_vht_from_ni(vif, sta, ni); #endif /* * We are also called from node allocation which net80211 * can do even on `ifconfig down`; in that case the chanctx * may still be valid and we get a discrepancy between * sta and chanctx. Thus do not try to update the chanctx * when called from lkpi_lsta_alloc(). */ if (updchnctx) lkpi_sync_chanctx_cw_from_rx_bw(hw, vif, sta); } static uint8_t lkpi_get_max_rx_chains(struct ieee80211_node *ni) { uint8_t chains; #if defined(LKPI_80211_HT) || defined(LKPI_80211_VHT) struct lkpi_sta *lsta; struct ieee80211_sta *sta; lsta = ni->ni_drv_data; sta = LSTA_TO_STA(lsta); #endif chains = 1; #if defined(LKPI_80211_HT) IMPROVE("We should factor counting MCS/NSS out for sync and here"); if (sta->deflink.ht_cap.ht_supported) chains = MAX(chains, sta->deflink.rx_nss); #endif #if defined(LKPI_80211_VHT) if (sta->deflink.vht_cap.vht_supported) chains = MAX(chains, sta->deflink.rx_nss); #endif return (chains); } static void lkpi_lsta_dump(struct lkpi_sta *lsta, struct ieee80211_node *ni, const char *_f, int _l) { #ifdef LINUXKPI_DEBUG_80211 if ((linuxkpi_debug_80211 & D80211_TRACE_STA) == 0) return; if (lsta == NULL) return; printf("%s:%d lsta %p ni %p sta %p\n", _f, _l, lsta, ni, &lsta->sta); if (ni != NULL) ieee80211_dump_node(NULL, ni); printf("\ttxq_task txq len %d mtx\n", mbufq_len(&lsta->txq)); printf("\tkc %p state %d added_to_drv %d in_mgd %d\n", &lsta->kc[0], lsta->state, lsta->added_to_drv, lsta->in_mgd); #endif } static void lkpi_lsta_remove(struct lkpi_sta *lsta, struct lkpi_vif *lvif) { lockdep_assert_wiphy(lsta->hw->wiphy); KASSERT(!list_empty(&lsta->lsta_list), ("%s: lsta %p ni %p\n", __func__, lsta, lsta->ni)); list_del_init(&lsta->lsta_list); } static struct lkpi_sta * lkpi_lsta_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN], struct ieee80211_hw *hw, struct ieee80211_node *ni) { struct lkpi_sta *lsta; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct ieee80211_sta *sta; int band, i, tid; lsta = malloc(sizeof(*lsta) + hw->sta_data_size, M_LKPI80211, M_NOWAIT | M_ZERO); if (lsta == NULL) return (NULL); lsta->hw = hw; lsta->added_to_drv = false; lsta->state = IEEE80211_STA_NOTEXIST; /* * Link the ni to the lsta here without taking a reference. * For one we would have to take the reference in node_init() * as ieee80211_alloc_node() will initialise the refcount after us. * For the other a ni and an lsta are 1:1 mapped and always together * from [ic_]node_alloc() to [ic_]node_free() so we are essentally * using the ni references for the lsta as well despite it being * two separate allocations. */ lsta->ni = ni; /* The back-pointer "drv_data" to net80211_node let's us get lsta. */ ni->ni_drv_data = lsta; lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); sta = LSTA_TO_STA(lsta); IEEE80211_ADDR_COPY(sta->addr, mac); /* TXQ */ for (tid = 0; tid < nitems(sta->txq); tid++) { struct lkpi_txq *ltxq; /* We are not limiting ourselves to hw.queues here. */ ltxq = malloc(sizeof(*ltxq) + hw->txq_data_size, M_LKPI80211, M_NOWAIT | M_ZERO); if (ltxq == NULL) goto cleanup; /* iwlwifi//mvm/sta.c::tid_to_mac80211_ac[] */ if (tid == IEEE80211_NUM_TIDS) { if (!ieee80211_hw_check(hw, STA_MMPDU_TXQ)) { free(ltxq, M_LKPI80211); continue; } IMPROVE("AP/if we support non-STA here too"); ltxq->txq.ac = IEEE80211_AC_VO; } else { ltxq->txq.ac = ieee80211e_up_to_ac[tid & 7]; } ltxq->seen_dequeue = false; ltxq->stopped = false; ltxq->txq.vif = vif; ltxq->txq.tid = tid; ltxq->txq.sta = sta; TAILQ_ELEM_INIT(ltxq, txq_entry); skb_queue_head_init(<xq->skbq); LKPI_80211_LTXQ_LOCK_INIT(ltxq); sta->txq[tid] = <xq->txq; } /* Deflink information. */ for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *supband; supband = hw->wiphy->bands[band]; if (supband == NULL) continue; for (i = 0; i < supband->n_bitrates; i++) { switch (band) { case NL80211_BAND_2GHZ: switch (supband->bitrates[i].bitrate) { case 240: /* 11g only */ case 120: /* 11g only */ case 110: case 60: /* 11g only */ case 55: case 20: case 10: sta->deflink.supp_rates[band] |= BIT(i); break; } break; case NL80211_BAND_5GHZ: switch (supband->bitrates[i].bitrate) { case 240: case 120: case 60: sta->deflink.supp_rates[band] |= BIT(i); break; } break; } } } sta->deflink.smps_mode = IEEE80211_SMPS_OFF; sta->deflink.bandwidth = IEEE80211_STA_RX_BW_20; sta->deflink.rx_nss = 1; lkpi_sta_sync_from_ni(hw, vif, sta, ni, false); IMPROVE("he, eht, bw_320, ... smps_mode, .."); /* Link configuration. */ IEEE80211_ADDR_COPY(sta->deflink.addr, sta->addr); sta->link[0] = &sta->deflink; for (i = 1; i < nitems(sta->link); i++) { IMPROVE("more links; only link[0] = deflink currently."); } IMPROVE("11be"); sta->mlo = false; /* Deferred TX path. */ LKPI_80211_LSTA_TXQ_LOCK_INIT(lsta); TASK_INIT(&lsta->txq_task, 0, lkpi_80211_txq_task, lsta); mbufq_init(&lsta->txq, 32 * NAPI_POLL_WEIGHT); lsta->txq_ready = true; return (lsta); cleanup: for (; tid >= 0; tid--) { struct lkpi_txq *ltxq; ltxq = TXQ_TO_LTXQ(sta->txq[tid]); LKPI_80211_LTXQ_LOCK_DESTROY(ltxq); free(sta->txq[tid], M_LKPI80211); } free(lsta, M_LKPI80211); return (NULL); } static void lkpi_lsta_free(struct lkpi_sta *lsta, struct ieee80211_node *ni) { struct mbuf *m; if (lsta->added_to_drv) panic("%s: Trying to free an lsta still known to firmware: " "lsta %p ni %p added_to_drv %d\n", __func__, lsta, ni, lsta->added_to_drv); /* XXX-BZ free resources, ... */ IMPROVE(); /* Drain sta->txq[] */ LKPI_80211_LSTA_TXQ_LOCK(lsta); lsta->txq_ready = false; LKPI_80211_LSTA_TXQ_UNLOCK(lsta); /* Drain taskq, won't be restarted until added_to_drv is set again. */ while (taskqueue_cancel(taskqueue_thread, &lsta->txq_task, NULL) != 0) taskqueue_drain(taskqueue_thread, &lsta->txq_task); /* Flush mbufq (make sure to release ni refs!). */ m = mbufq_dequeue(&lsta->txq); while (m != NULL) { struct ieee80211_node *nim; nim = (struct ieee80211_node *)m->m_pkthdr.rcvif; if (nim != NULL) ieee80211_free_node(nim); m_freem(m); m = mbufq_dequeue(&lsta->txq); } KASSERT(mbufq_empty(&lsta->txq), ("%s: lsta %p has txq len %d != 0\n", __func__, lsta, mbufq_len(&lsta->txq))); LKPI_80211_LSTA_TXQ_LOCK_DESTROY(lsta); /* Remove lsta from vif; that is done by the state machine. Should assert it? */ IMPROVE("Make sure everything is cleaned up."); /* Free lsta. */ lsta->ni = NULL; ni->ni_drv_data = NULL; free(lsta, M_LKPI80211); } static enum nl80211_band lkpi_net80211_chan_to_nl80211_band(struct ieee80211_channel *c) { if (IEEE80211_IS_CHAN_2GHZ(c)) return (NL80211_BAND_2GHZ); else if (IEEE80211_IS_CHAN_5GHZ(c)) return (NL80211_BAND_5GHZ); #ifdef __notyet__ else if () return (NL80211_BAND_6GHZ); else if () return (NL80211_BAND_60GHZ); else if (IEEE80211_IS_CHAN_GSM(c)) return (NL80211_BAND_XXX); #endif else panic("%s: unsupported band. c %p flags %#x\n", __func__, c, c->ic_flags); } static uint32_t lkpi_nl80211_band_to_net80211_band(enum nl80211_band band) { /* XXX-BZ this is just silly; net80211 is too convoluted. */ /* IEEE80211_CHAN_A / _G / .. doesn't really work either. */ switch (band) { case NL80211_BAND_2GHZ: return (IEEE80211_CHAN_2GHZ); break; case NL80211_BAND_5GHZ: return (IEEE80211_CHAN_5GHZ); break; case NL80211_BAND_60GHZ: break; case NL80211_BAND_6GHZ: break; default: panic("%s: unsupported band %u\n", __func__, band); break; } IMPROVE(); return (0x00); } #if 0 static enum ieee80211_ac_numbers lkpi_ac_net_to_l80211(int ac) { switch (ac) { case WME_AC_VO: return (IEEE80211_AC_VO); case WME_AC_VI: return (IEEE80211_AC_VI); case WME_AC_BE: return (IEEE80211_AC_BE); case WME_AC_BK: return (IEEE80211_AC_BK); default: printf("%s: invalid WME_AC_* input: ac = %d\n", __func__, ac); return (IEEE80211_AC_BE); } } #endif static enum nl80211_iftype lkpi_opmode_to_vif_type(enum ieee80211_opmode opmode) { switch (opmode) { case IEEE80211_M_IBSS: return (NL80211_IFTYPE_ADHOC); break; case IEEE80211_M_STA: return (NL80211_IFTYPE_STATION); break; case IEEE80211_M_WDS: return (NL80211_IFTYPE_WDS); break; case IEEE80211_M_HOSTAP: return (NL80211_IFTYPE_AP); break; case IEEE80211_M_MONITOR: return (NL80211_IFTYPE_MONITOR); break; case IEEE80211_M_MBSS: return (NL80211_IFTYPE_MESH_POINT); break; case IEEE80211_M_AHDEMO: /* FALLTHROUGH */ default: printf("ERROR: %s: unsupported opmode %d\n", __func__, opmode); /* FALLTHROUGH */ } return (NL80211_IFTYPE_UNSPECIFIED); } #ifdef LKPI_80211_HW_CRYPTO static const char * lkpi_cipher_suite_to_name(uint32_t wlan_cipher_suite) { switch (wlan_cipher_suite) { case WLAN_CIPHER_SUITE_WEP40: return ("WEP40"); case WLAN_CIPHER_SUITE_WEP104: return ("WEP104"); case WLAN_CIPHER_SUITE_TKIP: return ("TKIP"); case WLAN_CIPHER_SUITE_CCMP: return ("CCMP"); case WLAN_CIPHER_SUITE_CCMP_256: return ("CCMP_256"); case WLAN_CIPHER_SUITE_GCMP: return ("GCMP"); case WLAN_CIPHER_SUITE_GCMP_256: return ("GCMP_256"); case WLAN_CIPHER_SUITE_AES_CMAC: return ("AES_CMAC"); case WLAN_CIPHER_SUITE_BIP_CMAC_256: return ("BIP_CMAC_256"); case WLAN_CIPHER_SUITE_BIP_GMAC_128: return ("BIP_GMAC_128"); case WLAN_CIPHER_SUITE_BIP_GMAC_256: return ("BIP_GMAC_256"); default: return ("??"); } } static uint32_t lkpi_l80211_to_net80211_cyphers(struct ieee80211com *ic, uint32_t wlan_cipher_suite) { switch (wlan_cipher_suite) { case WLAN_CIPHER_SUITE_WEP40: return (IEEE80211_CRYPTO_WEP); case WLAN_CIPHER_SUITE_WEP104: return (IEEE80211_CRYPTO_WEP); case WLAN_CIPHER_SUITE_TKIP: return (IEEE80211_CRYPTO_TKIP); case WLAN_CIPHER_SUITE_CCMP: return (IEEE80211_CRYPTO_AES_CCM); case WLAN_CIPHER_SUITE_CCMP_256: return (IEEE80211_CRYPTO_AES_CCM_256); case WLAN_CIPHER_SUITE_GCMP: return (IEEE80211_CRYPTO_AES_GCM_128); case WLAN_CIPHER_SUITE_GCMP_256: return (IEEE80211_CRYPTO_AES_GCM_256); case WLAN_CIPHER_SUITE_AES_CMAC: return (IEEE80211_CRYPTO_BIP_CMAC_128); case WLAN_CIPHER_SUITE_BIP_CMAC_256: return (IEEE80211_CRYPTO_BIP_CMAC_256); case WLAN_CIPHER_SUITE_BIP_GMAC_128: return (IEEE80211_CRYPTO_BIP_GMAC_128); case WLAN_CIPHER_SUITE_BIP_GMAC_256: return (IEEE80211_CRYPTO_BIP_GMAC_256); default: ic_printf(ic, "%s: unknown WLAN Cipher Suite %#08x | %u (%s)\n", __func__, wlan_cipher_suite >> 8, wlan_cipher_suite & 0xff, lkpi_cipher_suite_to_name(wlan_cipher_suite)); return (0); } } static uint32_t lkpi_net80211_to_l80211_cipher_suite(uint32_t cipher, uint8_t keylen) { switch (cipher) { case IEEE80211_CIPHER_WEP: if (keylen < 8) return (WLAN_CIPHER_SUITE_WEP40); else return (WLAN_CIPHER_SUITE_WEP104); break; case IEEE80211_CIPHER_TKIP: return (WLAN_CIPHER_SUITE_TKIP); case IEEE80211_CIPHER_AES_CCM: return (WLAN_CIPHER_SUITE_CCMP); case IEEE80211_CIPHER_AES_CCM_256: return (WLAN_CIPHER_SUITE_CCMP_256); case IEEE80211_CIPHER_AES_GCM_128: return (WLAN_CIPHER_SUITE_GCMP); case IEEE80211_CIPHER_AES_GCM_256: return (WLAN_CIPHER_SUITE_GCMP_256); case IEEE80211_CIPHER_BIP_CMAC_128: return (WLAN_CIPHER_SUITE_AES_CMAC); case IEEE80211_CIPHER_BIP_CMAC_256: return (WLAN_CIPHER_SUITE_BIP_CMAC_256); case IEEE80211_CIPHER_BIP_GMAC_128: return (WLAN_CIPHER_SUITE_BIP_GMAC_128); case IEEE80211_CIPHER_BIP_GMAC_256: return (WLAN_CIPHER_SUITE_BIP_GMAC_256); case IEEE80211_CIPHER_AES_OCB: case IEEE80211_CIPHER_TKIPMIC: /* * TKIP w/ hw MIC support * (gone wrong; should really be a crypto flag in net80211). */ case IEEE80211_CIPHER_CKIP: case IEEE80211_CIPHER_NONE: printf("%s: unsupported cipher %#010x\n", __func__, cipher); break; default: printf("%s: unknown cipher %#010x\n", __func__, cipher); }; return (0); } #endif #ifdef __notyet__ static enum ieee80211_sta_state lkpi_net80211_state_to_sta_state(enum ieee80211_state state) { /* * XXX-BZ The net80211 states are "try to ..", the lkpi8011 states are * "done". Also ASSOC/AUTHORIZED are both "RUN" then? */ switch (state) { case IEEE80211_S_INIT: return (IEEE80211_STA_NOTEXIST); case IEEE80211_S_SCAN: return (IEEE80211_STA_NONE); case IEEE80211_S_AUTH: return (IEEE80211_STA_AUTH); case IEEE80211_S_ASSOC: return (IEEE80211_STA_ASSOC); case IEEE80211_S_RUN: return (IEEE80211_STA_AUTHORIZED); case IEEE80211_S_CAC: case IEEE80211_S_CSA: case IEEE80211_S_SLEEP: default: UNIMPLEMENTED; }; return (IEEE80211_STA_NOTEXIST); } #endif static struct linuxkpi_ieee80211_channel * lkpi_find_lkpi80211_chan(struct lkpi_hw *lhw, struct ieee80211_channel *c) { struct ieee80211_hw *hw; struct linuxkpi_ieee80211_channel *channels; enum nl80211_band band; int i, nchans; hw = LHW_TO_HW(lhw); band = lkpi_net80211_chan_to_nl80211_band(c); if (hw->wiphy->bands[band] == NULL) return (NULL); nchans = hw->wiphy->bands[band]->n_channels; if (nchans <= 0) return (NULL); channels = hw->wiphy->bands[band]->channels; for (i = 0; i < nchans; i++) { if (channels[i].hw_value == c->ic_ieee) return (&channels[i]); } return (NULL); } #if 0 static struct linuxkpi_ieee80211_channel * lkpi_get_lkpi80211_chan(struct ieee80211com *ic, struct ieee80211_node *ni) { struct linuxkpi_ieee80211_channel *chan; struct ieee80211_channel *c; struct lkpi_hw *lhw; chan = NULL; if (ni != NULL && ni->ni_chan != IEEE80211_CHAN_ANYC) c = ni->ni_chan; else if (ic->ic_bsschan != IEEE80211_CHAN_ANYC) c = ic->ic_bsschan; else if (ic->ic_curchan != IEEE80211_CHAN_ANYC) c = ic->ic_curchan; else c = NULL; if (c != NULL && c != IEEE80211_CHAN_ANYC) { lhw = ic->ic_softc; chan = lkpi_find_lkpi80211_chan(lhw, c); } return (chan); } #endif struct linuxkpi_ieee80211_channel * linuxkpi_ieee80211_get_channel(struct wiphy *wiphy, uint32_t freq) { enum nl80211_band band; for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *supband; struct linuxkpi_ieee80211_channel *channels; int i; supband = wiphy->bands[band]; if (supband == NULL || supband->n_channels == 0) continue; channels = supband->channels; for (i = 0; i < supband->n_channels; i++) { if (channels[i].center_freq == freq) return (&channels[i]); } } return (NULL); } #ifdef LKPI_80211_HW_CRYPTO static int lkpi_sta_del_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct lkpi_sta *lsta) { int error; if (!lkpi_hwcrypto) return (0); lockdep_assert_wiphy(hw->wiphy); ieee80211_ref_node(lsta->ni); error = 0; for (ieee80211_keyix keyix = 0; keyix < nitems(lsta->kc); keyix++) { struct ieee80211_key_conf *kc; int err; if (lsta->kc[keyix] == NULL) continue; kc = lsta->kc[keyix]; #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO) ic_printf(lsta->ni->ni_ic, "%s: running set_key cmd %d(%s) for " "sta %6D: keyidx %u hw_key_idx %u flags %b\n", __func__, DISABLE_KEY, "DISABLE", lsta->sta.addr, ":", kc->keyidx, kc->hw_key_idx, kc->flags, IEEE80211_KEY_FLAG_BITS); #endif err = lkpi_80211_mo_set_key(hw, DISABLE_KEY, vif, LSTA_TO_STA(lsta), kc); if (err != 0) { ic_printf(lsta->ni->ni_ic, "%s: set_key cmd %d(%s) for " "sta %6D failed: %d\n", __func__, DISABLE_KEY, "DISABLE", lsta->sta.addr, ":", err); error++; /* * If we free the key here we will never be able to get it * removed from the driver/fw which will likely make us * crash (firmware). */ continue; } #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO) ic_printf(lsta->ni->ni_ic, "%s: set_key cmd %d(%s) for " "sta %6D succeeded: keyidx %u hw_key_idx %u flags %b\n", __func__, DISABLE_KEY, "DISABLE", lsta->sta.addr, ":", kc->keyidx, kc->hw_key_idx, kc->flags, IEEE80211_KEY_FLAG_BITS); #endif lsta->kc[keyix] = NULL; free(kc, M_LKPI80211); } ieee80211_free_node(lsta->ni); return (error); } /* XXX-BZ one day we should replace this iterating over VIFs, or node list? */ /* See also lkpi_sta_del_keys() these days. */ static int lkpi_iv_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct ieee80211com *ic; struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct lkpi_sta *lsta; struct ieee80211_vif *vif; struct ieee80211_sta *sta; struct ieee80211_node *ni; struct ieee80211_key_conf *kc; int error; ic = vap->iv_ic; if (IEEE80211_KEY_UNDEFINED(k)) { ic_printf(ic, "%s: vap %p key %p is undefined: %p %u\n", __func__, vap, k, k->wk_cipher, k->wk_keyix); return (0); } if (vap->iv_bss == NULL) { ic_printf(ic, "%s: iv_bss %p for vap %p is NULL\n", __func__, vap->iv_bss, vap); return (0); } ni = ieee80211_ref_node(vap->iv_bss); lsta = ni->ni_drv_data; if (lsta == NULL) { ic_printf(ic, "%s: ni %p (%6D) with lsta NULL\n", __func__, ni, ni->ni_bssid, ":"); ieee80211_free_node(ni); return (0); } sta = LSTA_TO_STA(lsta); if (lsta->kc[k->wk_keyix] == NULL) { #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO) ic_printf(ic, "%s: sta %6D and no key information, " "keyidx %u wk_macaddr %6D; returning success\n", __func__, sta->addr, ":", k->wk_keyix, k->wk_macaddr, ":"); #endif ieee80211_free_node(ni); return (1); } kc = lsta->kc[k->wk_keyix]; /* Re-check under lock. */ if (kc == NULL) { #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO) ic_printf(ic, "%s: sta %6D and key information vanished, " "returning success\n", __func__, sta->addr, ":"); #endif error = 1; goto out; } #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO) ic_printf(ic, "%s: running set_key cmd %d(%s) for sta %6D: " "keyidx %u hw_key_idx %u flags %b\n", __func__, DISABLE_KEY, "DISABLE", sta->addr, ":", kc->keyidx, kc->hw_key_idx, kc->flags, IEEE80211_KEY_FLAG_BITS); #endif lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); error = lkpi_80211_mo_set_key(hw, DISABLE_KEY, vif, sta, kc); if (error != 0) { ic_printf(ic, "%s: set_key cmd %d(%s) for sta %6D failed: %d\n", __func__, DISABLE_KEY, "DISABLE", sta->addr, ":", error); error = 0; goto out; } #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO) ic_printf(ic, "%s: set_key cmd %d(%s) for sta %6D succeeded: " "keyidx %u hw_key_idx %u flags %b\n", __func__, DISABLE_KEY, "DISABLE", sta->addr, ":", kc->keyidx, kc->hw_key_idx, kc->flags, IEEE80211_KEY_FLAG_BITS); #endif lsta->kc[k->wk_keyix] = NULL; free(kc, M_LKPI80211); error = 1; out: ieee80211_free_node(ni); return (error); } static int lkpi_iv_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct ieee80211com *ic; struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct lkpi_sta *lsta; struct ieee80211_vif *vif; struct ieee80211_sta *sta; struct ieee80211_node *ni; struct ieee80211_key_conf *kc; uint32_t lcipher; uint16_t exp_flags; uint8_t keylen; int error; ic = vap->iv_ic; if (IEEE80211_KEY_UNDEFINED(k)) { ic_printf(ic, "%s: vap %p key %p is undefined: %p %u\n", __func__, vap, k, k->wk_cipher, k->wk_keyix); return (0); } if (vap->iv_bss == NULL) { ic_printf(ic, "%s: iv_bss %p for vap %p is NULL\n", __func__, vap->iv_bss, vap); return (0); } ni = ieee80211_ref_node(vap->iv_bss); lsta = ni->ni_drv_data; if (lsta == NULL) { ic_printf(ic, "%s: ni %p (%6D) with lsta NULL\n", __func__, ni, ni->ni_bssid, ":"); ieee80211_free_node(ni); return (0); } sta = LSTA_TO_STA(lsta); keylen = k->wk_keylen; lcipher = lkpi_net80211_to_l80211_cipher_suite( k->wk_cipher->ic_cipher, k->wk_keylen); switch (lcipher) { case WLAN_CIPHER_SUITE_CCMP: break; case WLAN_CIPHER_SUITE_TKIP: keylen += 2 * k->wk_cipher->ic_miclen; break; default: ic_printf(ic, "%s: CIPHER SUITE %#x (%s) not supported\n", __func__, lcipher, lkpi_cipher_suite_to_name(lcipher)); IMPROVE(); ieee80211_free_node(ni); return (0); } if (lsta->kc[k->wk_keyix] != NULL) { IMPROVE("Still in firmware? Del first. Can we assert this cannot happen?"); ic_printf(ic, "%s: sta %6D found with key information\n", __func__, sta->addr, ":"); kc = lsta->kc[k->wk_keyix]; lsta->kc[k->wk_keyix] = NULL; free(kc, M_LKPI80211); kc = NULL; /* safeguard */ } kc = malloc(sizeof(*kc) + keylen, M_LKPI80211, M_WAITOK | M_ZERO); kc->_k = k; /* Save the pointer to net80211. */ kc->cipher = lcipher; kc->keyidx = k->wk_keyix; #if 0 kc->hw_key_idx = /* set by hw and needs to be passed for TX */; #endif atomic64_set(&kc->tx_pn, k->wk_keytsc); kc->keylen = k->wk_keylen; memcpy(kc->key, k->wk_key, k->wk_keylen); if (k->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV)) kc->flags |= IEEE80211_KEY_FLAG_PAIRWISE; if (k->wk_flags & IEEE80211_KEY_GROUP) kc->flags &= ~IEEE80211_KEY_FLAG_PAIRWISE; switch (kc->cipher) { case WLAN_CIPHER_SUITE_CCMP: kc->iv_len = k->wk_cipher->ic_header; kc->icv_len = k->wk_cipher->ic_trailer; break; case WLAN_CIPHER_SUITE_TKIP: memcpy(kc->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, k->wk_txmic, k->wk_cipher->ic_miclen); memcpy(kc->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, k->wk_rxmic, k->wk_cipher->ic_miclen); kc->iv_len = k->wk_cipher->ic_header; kc->icv_len = k->wk_cipher->ic_trailer; break; default: /* currently UNREACH */ IMPROVE(); break; }; lsta->kc[k->wk_keyix] = kc; #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO) ic_printf(ic, "%s: running set_key cmd %d(%s) for sta %6D: " "kc %p keyidx %u hw_key_idx %u keylen %u flags %b\n", __func__, SET_KEY, "SET", sta->addr, ":", kc, kc->keyidx, kc->hw_key_idx, kc->keylen, kc->flags, IEEE80211_KEY_FLAG_BITS); #endif lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); error = lkpi_80211_mo_set_key(hw, SET_KEY, vif, sta, kc); if (error != 0) { ic_printf(ic, "%s: set_key cmd %d(%s) for sta %6D failed: %d\n", __func__, SET_KEY, "SET", sta->addr, ":", error); lsta->kc[k->wk_keyix] = NULL; free(kc, M_LKPI80211); ieee80211_free_node(ni); return (0); } #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO) ic_printf(ic, "%s: set_key cmd %d(%s) for sta %6D succeeded: " "kc %p keyidx %u hw_key_idx %u flags %b\n", __func__, SET_KEY, "SET", sta->addr, ":", kc, kc->keyidx, kc->hw_key_idx, kc->flags, IEEE80211_KEY_FLAG_BITS); #endif exp_flags = 0; switch (kc->cipher) { case WLAN_CIPHER_SUITE_TKIP: exp_flags = (IEEE80211_KEY_FLAG_PAIRWISE | IEEE80211_KEY_FLAG_PUT_IV_SPACE | IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE); #define TKIP_INVAL_COMBINATION \ (IEEE80211_KEY_FLAG_PUT_MIC_SPACE|IEEE80211_KEY_FLAG_GENERATE_MMIC) if ((kc->flags & TKIP_INVAL_COMBINATION) == TKIP_INVAL_COMBINATION) { ic_printf(ic, "%s: SET_KEY for %s returned invalid " "combination %b\n", __func__, lkpi_cipher_suite_to_name(kc->cipher), kc->flags, IEEE80211_KEY_FLAG_BITS); } #undef TKIP_INVAL_COMBINATION #ifdef __notyet__ /* Do flags surgery; special see linuxkpi_ieee80211_ifattach(). */ if ((kc->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) != 0) { k->wk_flags &= ~(IEEE80211_KEY_NOMICMGT|IEEE80211_KEY_NOMIC); k->wk_flags |= IEEE80211_KEY_SWMIC; ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC } #endif break; case WLAN_CIPHER_SUITE_CCMP: exp_flags = (IEEE80211_KEY_FLAG_PAIRWISE | IEEE80211_KEY_FLAG_PUT_IV_SPACE | IEEE80211_KEY_FLAG_GENERATE_IV | IEEE80211_KEY_FLAG_GENERATE_IV_MGMT | /* Only needs IV geeration for MGMT frames. */ IEEE80211_KEY_FLAG_SW_MGMT_TX); /* MFP in software */ break; } if ((kc->flags & ~exp_flags) != 0) ic_printf(ic, "%s: SET_KEY for %s returned unexpected key flags: " " %#06x & ~%#06x = %b\n", __func__, lkpi_cipher_suite_to_name(kc->cipher), kc->flags, exp_flags, (kc->flags & ~exp_flags), IEEE80211_KEY_FLAG_BITS); #ifdef __notyet__ /* Do flags surgery. */ if ((kc->flags & IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) == 0) k->wk_flags |= IEEE80211_KEY_NOIVMGT; if ((kc->flags & IEEE80211_KEY_FLAG_GENERATE_IV) == 0) k->wk_flags |= IEEE80211_KEY_NOIV; #endif ieee80211_free_node(ni); return (1); } static void lkpi_iv_key_update_begin(struct ieee80211vap *vap) { struct ieee80211_node_table *nt; struct ieee80211com *ic; struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_node *ni; bool icislocked, ntislocked; ic = vap->iv_ic; lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); lvif = VAP_TO_LVIF(vap); nt = &ic->ic_sta; icislocked = IEEE80211_IS_LOCKED(ic); ntislocked = IEEE80211_NODE_IS_LOCKED(nt); #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO) ic_printf(ic, "%s: tid %d vap %p ic %p %slocked nt %p %slocked " "lvif ic_unlocked %d nt_unlocked %d\n", __func__, curthread->td_tid, vap, ic, icislocked ? "" : "un", nt, ntislocked ? "" : "un", lvif->ic_unlocked, lvif->nt_unlocked); #endif /* * This is inconsistent net80211 locking to be fixed one day. */ /* Try to make sure the node does not go away while possibly unlocked. */ ni = NULL; if (icislocked || ntislocked) { if (vap->iv_bss != NULL) ni = ieee80211_ref_node(vap->iv_bss); } if (icislocked) IEEE80211_UNLOCK(ic); if (ntislocked) IEEE80211_NODE_UNLOCK(nt); wiphy_lock(hw->wiphy); KASSERT(lvif->key_update_iv_bss == NULL, ("%s: key_update_iv_bss not NULL %p", __func__, lvif->key_update_iv_bss)); lvif->key_update_iv_bss = ni; /* * ic/nt_unlocked could be a bool given we are under the lock and there * must only be a single thread. * In case anything in the future disturbs the order the refcnt will * help us catching problems a lot easier. */ if (icislocked) refcount_acquire(&lvif->ic_unlocked); if (ntislocked) refcount_acquire(&lvif->nt_unlocked); } static void lkpi_iv_key_update_end(struct ieee80211vap *vap) { struct ieee80211_node_table *nt; struct ieee80211com *ic; struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; bool icislocked, ntislocked; ic = vap->iv_ic; lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); lvif = VAP_TO_LVIF(vap); nt = &ic->ic_sta; icislocked = IEEE80211_IS_LOCKED(ic); MPASS(!icislocked); ntislocked = IEEE80211_NODE_IS_LOCKED(nt); MPASS(!ntislocked); #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_HW_CRYPTO) ic_printf(ic, "%s: tid %d vap %p ic %p %slocked nt %p %slocked " "lvif ic_unlocked %d nt_unlocked %d\n", __func__, curthread->td_tid, vap, ic, icislocked ? "" : "un", nt, ntislocked ? "" : "un", lvif->ic_unlocked, lvif->nt_unlocked); #endif /* * Check under lock; see comment in lkpi_iv_key_update_begin(). * In case the refcnt gets out of sync locking in net80211 will * quickly barf as well (trying to unlock a lock not held). */ icislocked = refcount_release_if_last(&lvif->ic_unlocked); ntislocked = refcount_release_if_last(&lvif->nt_unlocked); if (lvif->key_update_iv_bss != NULL) { ieee80211_free_node(lvif->key_update_iv_bss); lvif->key_update_iv_bss = NULL; } wiphy_unlock(hw->wiphy); /* * This is inconsistent net80211 locking to be fixed one day. * ic before nt to avoid a LOR. */ if (icislocked) IEEE80211_LOCK(ic); if (ntislocked) IEEE80211_NODE_LOCK(nt); } #endif static u_int lkpi_ic_update_mcast_copy(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct netdev_hw_addr_list *mc_list; struct netdev_hw_addr *addr; KASSERT(arg != NULL && sdl != NULL, ("%s: arg %p sdl %p cnt %u\n", __func__, arg, sdl, cnt)); mc_list = arg; /* If it is on the list already skip it. */ netdev_hw_addr_list_for_each(addr, mc_list) { if (!memcmp(addr->addr, LLADDR(sdl), sdl->sdl_alen)) return (0); } addr = malloc(sizeof(*addr), M_LKPI80211, M_NOWAIT | M_ZERO); if (addr == NULL) return (0); INIT_LIST_HEAD(&addr->addr_list); memcpy(addr->addr, LLADDR(sdl), sdl->sdl_alen); /* XXX this should be a netdev function? */ list_add(&addr->addr_list, &mc_list->addr_list); mc_list->count++; #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE) printf("%s:%d: mc_list count %d: added %6D\n", __func__, __LINE__, mc_list->count, addr->addr, ":"); #endif return (1); } static void lkpi_update_mcast_filter(struct ieee80211com *ic, bool force) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct netdev_hw_addr_list mc_list; struct list_head *le, *next; struct netdev_hw_addr *addr; struct ieee80211vap *vap; u64 mc; unsigned int changed_flags, total_flags; lhw = ic->ic_softc; if (lhw->ops->prepare_multicast == NULL || lhw->ops->configure_filter == NULL) return; if (!lhw->update_mc && !force) return; changed_flags = total_flags = 0; mc_list.count = 0; INIT_LIST_HEAD(&mc_list.addr_list); if (ic->ic_allmulti == 0) { TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if_foreach_llmaddr(vap->iv_ifp, lkpi_ic_update_mcast_copy, &mc_list); } else { changed_flags |= FIF_ALLMULTI; } hw = LHW_TO_HW(lhw); mc = lkpi_80211_mo_prepare_multicast(hw, &mc_list); /* * XXX-BZ make sure to get this sorted what is a change, * what gets all set; what was already set? */ total_flags = changed_flags; lkpi_80211_mo_configure_filter(hw, changed_flags, &total_flags, mc); #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE) printf("%s: changed_flags %#06x count %d total_flags %#010x\n", __func__, changed_flags, mc_list.count, total_flags); #endif if (mc_list.count != 0) { list_for_each_safe(le, next, &mc_list.addr_list) { addr = list_entry(le, struct netdev_hw_addr, addr_list); free(addr, M_LKPI80211); mc_list.count--; } } KASSERT(mc_list.count == 0, ("%s: mc_list %p count %d != 0\n", __func__, &mc_list, mc_list.count)); } static enum ieee80211_bss_changed lkpi_update_dtim_tsf(struct ieee80211_vif *vif, struct ieee80211_node *ni, struct ieee80211vap *vap, const char *_f, int _l) { enum ieee80211_bss_changed bss_changed; bss_changed = 0; #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE) printf("%s:%d [%s:%d] assoc %d aid %d beacon_int %u " "dtim_period %u sync_dtim_count %u sync_tsf %ju " "sync_device_ts %u bss_changed %#010jx\n", __func__, __LINE__, _f, _l, vif->cfg.assoc, vif->cfg.aid, vif->bss_conf.beacon_int, vif->bss_conf.dtim_period, vif->bss_conf.sync_dtim_count, (uintmax_t)vif->bss_conf.sync_tsf, vif->bss_conf.sync_device_ts, (uintmax_t)bss_changed); #endif if (vif->bss_conf.beacon_int != ni->ni_intval) { vif->bss_conf.beacon_int = ni->ni_intval; /* iwlwifi FW bug workaround; iwl_mvm_mac_sta_state. */ if (vif->bss_conf.beacon_int < 16) vif->bss_conf.beacon_int = 16; bss_changed |= BSS_CHANGED_BEACON_INT; } if (vif->bss_conf.dtim_period != vap->iv_dtim_period && vap->iv_dtim_period > 0) { vif->bss_conf.dtim_period = vap->iv_dtim_period; bss_changed |= BSS_CHANGED_BEACON_INFO; } vif->bss_conf.sync_dtim_count = vap->iv_dtim_count; vif->bss_conf.sync_tsf = le64toh(ni->ni_tstamp.tsf); /* vif->bss_conf.sync_device_ts = set in linuxkpi_ieee80211_rx. */ #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE) printf("%s:%d [%s:%d] assoc %d aid %d beacon_int %u " "dtim_period %u sync_dtim_count %u sync_tsf %ju " "sync_device_ts %u bss_changed %#010jx\n", __func__, __LINE__, _f, _l, vif->cfg.assoc, vif->cfg.aid, vif->bss_conf.beacon_int, vif->bss_conf.dtim_period, vif->bss_conf.sync_dtim_count, (uintmax_t)vif->bss_conf.sync_tsf, vif->bss_conf.sync_device_ts, (uintmax_t)bss_changed); #endif return (bss_changed); } static void lkpi_stop_hw_scan(struct lkpi_hw *lhw, struct ieee80211_vif *vif) { struct ieee80211_hw *hw; int error; bool cancel; LKPI_80211_LHW_SCAN_LOCK(lhw); cancel = (lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0; LKPI_80211_LHW_SCAN_UNLOCK(lhw); if (!cancel) return; hw = LHW_TO_HW(lhw); IEEE80211_UNLOCK(lhw->ic); wiphy_lock(hw->wiphy); /* Need to cancel the scan. */ lkpi_80211_mo_cancel_hw_scan(hw, vif); wiphy_unlock(hw->wiphy); /* Need to make sure we see ieee80211_scan_completed. */ LKPI_80211_LHW_SCAN_LOCK(lhw); if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0) error = msleep(lhw, &lhw->scan_mtx, 0, "lhwscanstop", hz/2); cancel = (lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0; LKPI_80211_LHW_SCAN_UNLOCK(lhw); IEEE80211_LOCK(lhw->ic); if (cancel) ic_printf(lhw->ic, "%s: failed to cancel scan: %d (%p, %p)\n", __func__, error, lhw, vif); } static void lkpi_hw_conf_idle(struct ieee80211_hw *hw, bool new) { struct lkpi_hw *lhw; int error; bool old; old = hw->conf.flags & IEEE80211_CONF_IDLE; if (old == new) return; hw->conf.flags ^= IEEE80211_CONF_IDLE; error = lkpi_80211_mo_config(hw, IEEE80211_CONF_CHANGE_IDLE); if (error != 0 && error != EOPNOTSUPP) { lhw = HW_TO_LHW(hw); ic_printf(lhw->ic, "ERROR: %s: config %#0x returned %d\n", __func__, IEEE80211_CONF_CHANGE_IDLE, error); } } static enum ieee80211_bss_changed lkpi_disassoc(struct ieee80211_sta *sta, struct ieee80211_vif *vif, struct lkpi_hw *lhw) { enum ieee80211_bss_changed changed; changed = 0; sta->aid = 0; if (vif->cfg.assoc) { lhw->update_mc = true; lkpi_update_mcast_filter(lhw->ic, true); vif->cfg.assoc = false; vif->cfg.aid = 0; changed |= BSS_CHANGED_ASSOC; IMPROVE(); /* * Executing the bss_info_changed(BSS_CHANGED_ASSOC) with * assoc = false right away here will remove the sta from * firmware for iwlwifi. * We no longer do this but only return the BSS_CHNAGED value. * The caller is responsible for removing the sta gong to * IEEE80211_STA_NOTEXIST and then executing the * bss_info_changed() update. * See lkpi_sta_run_to_init() for more detailed comment. */ } return (changed); } static void lkpi_wake_tx_queues(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool dequeue_seen, bool no_emptyq) { struct lkpi_txq *ltxq; int tid; bool ltxq_empty; /* Wake up all queues to know they are allocated in the driver. */ for (tid = 0; tid < nitems(sta->txq); tid++) { if (tid == IEEE80211_NUM_TIDS) { IMPROVE("station specific?"); if (!ieee80211_hw_check(hw, STA_MMPDU_TXQ)) continue; } else if (tid >= hw->queues) continue; if (sta->txq[tid] == NULL) continue; ltxq = TXQ_TO_LTXQ(sta->txq[tid]); if (dequeue_seen && !ltxq->seen_dequeue) continue; LKPI_80211_LTXQ_LOCK(ltxq); ltxq_empty = skb_queue_empty(<xq->skbq); LKPI_80211_LTXQ_UNLOCK(ltxq); if (no_emptyq && ltxq_empty) continue; lkpi_80211_mo_wake_tx_queue(hw, sta->txq[tid]); } } /* * On the way down from RUN -> ASSOC -> AUTH we may send a DISASSOC or DEAUTH * packet. The problem is that the state machine functions tend to hold the * LHW lock which will prevent lkpi_80211_txq_tx_one() from sending the packet. * We call this after dropping the ic lock and before acquiring the LHW lock. * we make sure no further packets are queued and if they are queued the task * will finish or be cancelled. At the end if a packet is left we manually * send it. scan_to_auth() would re-enable sending if the lsta would be * re-used. */ static void lkpi_80211_flush_tx(struct lkpi_hw *lhw, struct lkpi_sta *lsta) { struct ieee80211_hw *hw; struct mbufq mq; struct mbuf *m; int len; /* There is no lockdep_assert_not_held_wiphy(). */ hw = LHW_TO_HW(lhw); lockdep_assert_not_held(&hw->wiphy->mtx); /* Do not accept any new packets until scan_to_auth or lsta_free(). */ LKPI_80211_LSTA_TXQ_LOCK(lsta); lsta->txq_ready = false; LKPI_80211_LSTA_TXQ_UNLOCK(lsta); while (taskqueue_cancel(taskqueue_thread, &lsta->txq_task, NULL) != 0) taskqueue_drain(taskqueue_thread, &lsta->txq_task); LKPI_80211_LSTA_TXQ_LOCK(lsta); len = mbufq_len(&lsta->txq); if (len <= 0) { LKPI_80211_LSTA_TXQ_UNLOCK(lsta); return; } mbufq_init(&mq, IFQ_MAXLEN); mbufq_concat(&mq, &lsta->txq); LKPI_80211_LSTA_TXQ_UNLOCK(lsta); m = mbufq_dequeue(&mq); while (m != NULL) { lkpi_80211_txq_tx_one(lsta, m); m = mbufq_dequeue(&mq); } } static void lkpi_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_chanctx_conf *chanctx_conf; struct lkpi_chanctx *lchanctx; chanctx_conf = rcu_dereference_protected(vif->bss_conf.chanctx_conf, lockdep_is_held(&hw->wiphy->mtx)); if (chanctx_conf == NULL) return; /* Remove vif context. */ lkpi_80211_mo_unassign_vif_chanctx(hw, vif, &vif->bss_conf, chanctx_conf); lkpi_hw_conf_idle(hw, true); /* Remove chan ctx. */ lkpi_80211_mo_remove_chanctx(hw, chanctx_conf); /* Cleanup. */ rcu_assign_pointer(vif->bss_conf.chanctx_conf, NULL); lchanctx = CHANCTX_CONF_TO_LCHANCTX(chanctx_conf); list_del(&lchanctx->entry); free(lchanctx, M_LKPI80211); } /* -------------------------------------------------------------------------- */ static int lkpi_sta_state_do_nada(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { return (0); } /* lkpi_iv_newstate() handles the stop scan case generally. */ #define lkpi_sta_scan_to_init(_v, _n, _a) lkpi_sta_state_do_nada(_v, _n, _a) static int lkpi_sta_scan_to_auth(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct linuxkpi_ieee80211_channel *chan; struct lkpi_chanctx *lchanctx; struct ieee80211_chanctx_conf *chanctx_conf; struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct ieee80211_node *ni; struct lkpi_sta *lsta; enum ieee80211_bss_changed bss_changed; struct ieee80211_prep_tx_info prep_tx_info; uint32_t changed; int error; /* * In here we use vap->iv_bss until lvif->lvif_bss is set. * For all later (STATE >= AUTH) functions we need to use the lvif * cache which will be tracked even through (*iv_update_bss)(). */ if (vap->iv_bss == NULL) { ic_printf(vap->iv_ic, "%s: no iv_bss for vap %p\n", __func__, vap); return (EINVAL); } /* * Keep the ni alive locally. In theory (and practice) iv_bss can change * once we unlock here. This is due to net80211 allowing state changes * and new join1() despite having an active node as well as due to * the fact that the iv_bss can be swapped under the hood in (*iv_update_bss). */ ni = ieee80211_ref_node(vap->iv_bss); if (ni->ni_chan == NULL || ni->ni_chan == IEEE80211_CHAN_ANYC) { ic_printf(vap->iv_ic, "%s: no channel set for iv_bss ni %p " "on vap %p\n", __func__, ni, vap); ieee80211_free_node(ni); /* Error handling for the local ni. */ return (EINVAL); } lhw = vap->iv_ic->ic_softc; chan = lkpi_find_lkpi80211_chan(lhw, ni->ni_chan); if (chan == NULL) { ic_printf(vap->iv_ic, "%s: failed to get LKPI channel from " "iv_bss ni %p on vap %p\n", __func__, ni, vap); ieee80211_free_node(ni); /* Error handling for the local ni. */ return (ESRCH); } hw = LHW_TO_HW(lhw); lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); LKPI_80211_LVIF_LOCK(lvif); /* XXX-BZ KASSERT later? */ if (lvif->lvif_bss_synched || lvif->lvif_bss != NULL) { ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " "lvif_bss->ni %p synched %d\n", __func__, __LINE__, lvif, vap, vap->iv_bss, lvif->lvif_bss, (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, lvif->lvif_bss_synched); LKPI_80211_LVIF_UNLOCK(lvif); ieee80211_free_node(ni); /* Error handling for the local ni. */ return (EBUSY); } LKPI_80211_LVIF_UNLOCK(lvif); IEEE80211_UNLOCK(vap->iv_ic); wiphy_lock(hw->wiphy); /* Add chanctx (or if exists, change it). */ chanctx_conf = rcu_dereference_protected(vif->bss_conf.chanctx_conf, lockdep_is_held(&hw->wiphy->mtx)); if (chanctx_conf != NULL) { lchanctx = CHANCTX_CONF_TO_LCHANCTX(chanctx_conf); IMPROVE("diff changes for changed, working on live copy, rcu"); } else { /* Keep separate alloc as in Linux this is rcu managed? */ lchanctx = malloc(sizeof(*lchanctx) + hw->chanctx_data_size, M_LKPI80211, M_WAITOK | M_ZERO); chanctx_conf = &lchanctx->chanctx_conf; } chanctx_conf->rx_chains_static = 1; chanctx_conf->rx_chains_dynamic = 1; chanctx_conf->radar_enabled = (chan->flags & IEEE80211_CHAN_RADAR) ? true : false; chanctx_conf->def.chan = chan; chanctx_conf->def.width = NL80211_CHAN_WIDTH_20_NOHT; chanctx_conf->def.center_freq1 = ieee80211_get_channel_center_freq1(ni->ni_chan); chanctx_conf->def.center_freq2 = ieee80211_get_channel_center_freq2(ni->ni_chan); IMPROVE("Check vht_cap from band not just chan?"); KASSERT(ni->ni_chan != NULL && ni->ni_chan != IEEE80211_CHAN_ANYC, ("%s:%d: ni %p ni_chan %p\n", __func__, __LINE__, ni, ni->ni_chan)); #ifdef LKPI_80211_HT if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) chanctx_conf->def.width = NL80211_CHAN_WIDTH_40; else chanctx_conf->def.width = NL80211_CHAN_WIDTH_20; } #endif #ifdef LKPI_80211_VHT if (IEEE80211_IS_CHAN_VHT_5GHZ(ni->ni_chan)) { #ifdef __notyet__ if (IEEE80211_IS_CHAN_VHT80P80(ni->ni_chan)) chanctx_conf->def.width = NL80211_CHAN_WIDTH_80P80; else if (IEEE80211_IS_CHAN_VHT160(ni->ni_chan)) chanctx_conf->def.width = NL80211_CHAN_WIDTH_160; else #endif if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) chanctx_conf->def.width = NL80211_CHAN_WIDTH_80; } #endif chanctx_conf->rx_chains_dynamic = lkpi_get_max_rx_chains(ni); /* Responder ... */ #if 0 chanctx_conf->min_def.chan = chanctx_conf->def.chan; chanctx_conf->min_def.width = NL80211_CHAN_WIDTH_20_NOHT; #ifdef LKPI_80211_HT if (IEEE80211_IS_CHAN_HT(ni->ni_chan) || IEEE80211_IS_CHAN_VHT(ni->ni_chan)) chanctx_conf->min_def.width = NL80211_CHAN_WIDTH_20; #endif chanctx_conf->min_def.center_freq1 = chanctx_conf->def.center_freq1; chanctx_conf->min_def.center_freq2 = chanctx_conf->def.center_freq2; #else chanctx_conf->min_def = chanctx_conf->def; #endif /* Set bss info (bss_info_changed). */ bss_changed = 0; vif->bss_conf.bssid = ni->ni_bssid; bss_changed |= BSS_CHANGED_BSSID; vif->bss_conf.txpower = ni->ni_txpower; bss_changed |= BSS_CHANGED_TXPOWER; vif->cfg.idle = false; bss_changed |= BSS_CHANGED_IDLE; /* vif->bss_conf.basic_rates ? Where exactly? */ /* Should almost assert it is this. */ vif->cfg.assoc = false; vif->cfg.aid = 0; bss_changed |= lkpi_update_dtim_tsf(vif, ni, vap, __func__, __LINE__); error = 0; if (vif->bss_conf.chanctx_conf == chanctx_conf) { changed = IEEE80211_CHANCTX_CHANGE_MIN_WIDTH; changed |= IEEE80211_CHANCTX_CHANGE_RADAR; changed |= IEEE80211_CHANCTX_CHANGE_RX_CHAINS; changed |= IEEE80211_CHANCTX_CHANGE_WIDTH; lkpi_80211_mo_change_chanctx(hw, chanctx_conf, changed); } else { error = lkpi_80211_mo_add_chanctx(hw, chanctx_conf); if (error == 0 || error == EOPNOTSUPP) { vif->bss_conf.chanreq.oper.chan = chanctx_conf->def.chan; vif->bss_conf.chanreq.oper.width = chanctx_conf->def.width; vif->bss_conf.chanreq.oper.center_freq1 = chanctx_conf->def.center_freq1; vif->bss_conf.chanreq.oper.center_freq2 = chanctx_conf->def.center_freq2; } else { ic_printf(vap->iv_ic, "%s:%d: mo_add_chanctx " "failed: %d\n", __func__, __LINE__, error); goto out; } list_add_rcu(&lchanctx->entry, &lhw->lchanctx_list); rcu_assign_pointer(vif->bss_conf.chanctx_conf, chanctx_conf); /* Assign vif chanctx. */ if (error == 0) error = lkpi_80211_mo_assign_vif_chanctx(hw, vif, &vif->bss_conf, chanctx_conf); if (error == EOPNOTSUPP) error = 0; if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: mo_assign_vif_chanctx " "failed: %d\n", __func__, __LINE__, error); lkpi_80211_mo_remove_chanctx(hw, chanctx_conf); rcu_assign_pointer(vif->bss_conf.chanctx_conf, NULL); lchanctx = CHANCTX_CONF_TO_LCHANCTX(chanctx_conf); list_del(&lchanctx->entry); free(lchanctx, M_LKPI80211); goto out; } } IMPROVE("update radiotap chan fields too"); /* RATES */ IMPROVE("bss info: not all needs to come now and rates are missing"); lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed); /* * Given ni and lsta are 1:1 from alloc to free we can assert that * ni always has lsta data attach despite net80211 node swapping * under the hoods. */ KASSERT(ni->ni_drv_data != NULL, ("%s: ni %p ni_drv_data %p\n", __func__, ni, ni->ni_drv_data)); lsta = ni->ni_drv_data; /* * Make sure in case the sta did not change and we re-add it, * that we can tx again. */ LKPI_80211_LSTA_TXQ_LOCK(lsta); lsta->txq_ready = true; LKPI_80211_LSTA_TXQ_UNLOCK(lsta); /* Insert the [l]sta into the list of known stations. */ list_add_tail(&lsta->lsta_list, &lvif->lsta_list); /* Add (or adjust) sta and change state (from NOTEXIST) to NONE. */ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); KASSERT(lsta->state == IEEE80211_STA_NOTEXIST, ("%s: lsta %p state not " "NOTEXIST: %#x\n", __func__, lsta, lsta->state)); error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NONE); if (error != 0) { IMPROVE("do we need to undo the chan ctx?"); ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NONE) " "failed: %d\n", __func__, __LINE__, error); goto out; } #if 0 lsta->added_to_drv = true; /* mo manages. */ #endif lkpi_lsta_dump(lsta, ni, __func__, __LINE__); #if 0 /* * Wakeup all queues now that sta is there so we have as much time to * possibly prepare the queue in the driver to be ready for the 1st * packet; lkpi_80211_txq_tx_one() still has a workaround as there * is no guarantee or way to check. * XXX-BZ and by now we know that this does not work on all drivers * for all queues. */ lkpi_wake_tx_queues(hw, LSTA_TO_STA(lsta), false, false); #endif /* Start mgd_prepare_tx. */ memset(&prep_tx_info, 0, sizeof(prep_tx_info)); prep_tx_info.duration = PREP_TX_INFO_DURATION; lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info); lsta->in_mgd = true; /* * What is going to happen next: * - .. we should end up in "auth_to_assoc" * - event_callback * - update sta_state (NONE to AUTH) * - mgd_complete_tx * (ideally we'd do that on a callback for something else ...) */ wiphy_unlock(hw->wiphy); IEEE80211_LOCK(vap->iv_ic); LKPI_80211_LVIF_LOCK(lvif); /* Re-check given (*iv_update_bss) could have happened while we were unlocked. */ if (lvif->lvif_bss_synched || lvif->lvif_bss != NULL || lsta->ni != vap->iv_bss) ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " "lvif_bss->ni %p synched %d, ni %p lsta %p\n", __func__, __LINE__, lvif, vap, vap->iv_bss, lvif->lvif_bss, (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, lvif->lvif_bss_synched, ni, lsta); /* * Reference the "ni" for caching the lsta/ni in lvif->lvif_bss. * Given we cache lsta we use lsta->ni instead of ni here (even though * lsta->ni == ni) to be distinct from the rest of the code where we do * assume that ni == vap->iv_bss which it may or may not be. * So do NOT use iv_bss here anymore as that may have diverged from our * function local ni already while ic was unlocked and would lead to * inconsistencies. Go and see if we lost a race and do not update * lvif_bss_synched in that case. */ ieee80211_ref_node(lsta->ni); lvif->lvif_bss = lsta; if (lsta->ni == vap->iv_bss) { lvif->lvif_bss_synched = true; } else { /* Set to un-synched no matter what. */ lvif->lvif_bss_synched = false; /* * We do not error as someone has to take us down. * If we are followed by a 2nd, new net80211::join1() going to * AUTH lkpi_sta_a_to_a() will error, lkpi_sta_auth_to_{scan,init}() * will take the lvif->lvif_bss node down eventually. * What happens with the vap->iv_bss node will entirely be up * to net80211 as we never used the node beyond alloc()/free() * and we do not hold an extra reference for that anymore given * ni : lsta == 1:1. */ } LKPI_80211_LVIF_UNLOCK(lvif); goto out_relocked; out: wiphy_unlock(hw->wiphy); IEEE80211_LOCK(vap->iv_ic); out_relocked: /* * Release the reference that kept the ni stable locally * during the work of this function. */ if (ni != NULL) ieee80211_free_node(ni); return (error); } static int lkpi_sta_auth_to_scan(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct ieee80211_node *ni; struct lkpi_sta *lsta; struct ieee80211_sta *sta; struct ieee80211_prep_tx_info prep_tx_info; int error; lhw = vap->iv_ic->ic_softc; hw = LHW_TO_HW(lhw); lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); LKPI_80211_LVIF_LOCK(lvif); #ifdef LINUXKPI_DEBUG_80211 /* XXX-BZ KASSERT later; state going down so no action. */ if (lvif->lvif_bss == NULL) ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " "lvif_bss->ni %p synched %d\n", __func__, __LINE__, lvif, vap, vap->iv_bss, lvif->lvif_bss, (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, lvif->lvif_bss_synched); #endif lsta = lvif->lvif_bss; LKPI_80211_LVIF_UNLOCK(lvif); KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p " "lvif %p vap %p\n", __func__, lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap)); ni = lsta->ni; /* Reference held for lvif_bss. */ sta = LSTA_TO_STA(lsta); lkpi_lsta_dump(lsta, ni, __func__, __LINE__); IEEE80211_UNLOCK(vap->iv_ic); wiphy_lock(hw->wiphy); /* flush, drop. */ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), true); /* Wake tx queues to get packet(s) out. */ lkpi_wake_tx_queues(hw, sta, false, true); /* flush, no drop */ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), false); /* End mgd_complete_tx. */ if (lsta->in_mgd) { memset(&prep_tx_info, 0, sizeof(prep_tx_info)); prep_tx_info.success = false; lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); lsta->in_mgd = false; } /* sync_rx_queues */ lkpi_80211_mo_sync_rx_queues(hw); /* sta_pre_rcu_remove */ lkpi_80211_mo_sta_pre_rcu_remove(hw, vif, sta); /* Take the station down. */ /* Adjust sta and change state (from NONE) to NOTEXIST. */ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); KASSERT(lsta->state == IEEE80211_STA_NONE, ("%s: lsta %p state not " "NONE: %#x, nstate %d arg %d\n", __func__, lsta, lsta->state, nstate, arg)); error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NOTEXIST); if (error != 0) { IMPROVE("do we need to undo the chan ctx?"); ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NOTEXIST) " "failed: %d\n", __func__, __LINE__, error); goto out; } #if 0 lsta->added_to_drv = false; /* mo manages. */ #endif lkpi_lsta_dump(lsta, ni, __func__, __LINE__); LKPI_80211_LVIF_LOCK(lvif); /* Remove ni reference for this cache of lsta. */ lvif->lvif_bss = NULL; lvif->lvif_bss_synched = false; LKPI_80211_LVIF_UNLOCK(lvif); lkpi_lsta_remove(lsta, lvif); /* * The very last release the reference on the ni for the ni/lsta on * lvif->lvif_bss. Upon return from this both ni and lsta are invalid * and potentially freed. */ ieee80211_free_node(ni); /* conf_tx */ lkpi_remove_chanctx(hw, vif); out: wiphy_unlock(hw->wiphy); IEEE80211_LOCK(vap->iv_ic); return (error); } static int lkpi_sta_auth_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { int error; error = lkpi_sta_auth_to_scan(vap, nstate, arg); if (error == 0) error = lkpi_sta_scan_to_init(vap, nstate, arg); return (error); } static int lkpi_sta_auth_to_assoc(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct lkpi_sta *lsta; struct ieee80211_prep_tx_info prep_tx_info; int error; lhw = vap->iv_ic->ic_softc; hw = LHW_TO_HW(lhw); lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); IEEE80211_UNLOCK(vap->iv_ic); wiphy_lock(hw->wiphy); LKPI_80211_LVIF_LOCK(lvif); /* XXX-BZ KASSERT later? */ if (!lvif->lvif_bss_synched || lvif->lvif_bss == NULL) { #ifdef LINUXKPI_DEBUG_80211 ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " "lvif_bss->ni %p synched %d\n", __func__, __LINE__, lvif, vap, vap->iv_bss, lvif->lvif_bss, (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, lvif->lvif_bss_synched); #endif error = ENOTRECOVERABLE; LKPI_80211_LVIF_UNLOCK(lvif); goto out; } lsta = lvif->lvif_bss; LKPI_80211_LVIF_UNLOCK(lvif); KASSERT(lsta != NULL, ("%s: lsta %p\n", __func__, lsta)); /* Finish auth. */ IMPROVE("event callback"); /* Update sta_state (NONE to AUTH). */ KASSERT(lsta->state == IEEE80211_STA_NONE, ("%s: lsta %p state not " "NONE: %#x\n", __func__, lsta, lsta->state)); error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_AUTH); if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(AUTH) " "failed: %d\n", __func__, __LINE__, error); goto out; } /* End mgd_complete_tx. */ if (lsta->in_mgd) { memset(&prep_tx_info, 0, sizeof(prep_tx_info)); prep_tx_info.success = true; lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); lsta->in_mgd = false; } /* Now start assoc. */ /* Start mgd_prepare_tx. */ if (!lsta->in_mgd) { memset(&prep_tx_info, 0, sizeof(prep_tx_info)); prep_tx_info.duration = PREP_TX_INFO_DURATION; lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info); lsta->in_mgd = true; } /* Wake tx queue to get packet out. */ lkpi_wake_tx_queues(hw, LSTA_TO_STA(lsta), false, true); /* * .. we end up in "assoc_to_run" * - update sta_state (AUTH to ASSOC) * - conf_tx [all] * - bss_info_changed (assoc, aid, ssid, ..) * - change_chanctx (if needed) * - event_callback * - mgd_complete_tx */ out: wiphy_unlock(hw->wiphy); IEEE80211_LOCK(vap->iv_ic); return (error); } /* auth_to_auth, assoc_to_assoc. */ static int lkpi_sta_a_to_a(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct lkpi_sta *lsta; struct ieee80211_prep_tx_info prep_tx_info; int error; lhw = vap->iv_ic->ic_softc; hw = LHW_TO_HW(lhw); lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); IEEE80211_UNLOCK(vap->iv_ic); wiphy_lock(hw->wiphy); LKPI_80211_LVIF_LOCK(lvif); /* XXX-BZ KASSERT later? */ if (!lvif->lvif_bss_synched || lvif->lvif_bss == NULL) { #ifdef LINUXKPI_DEBUG_80211 ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " "lvif_bss->ni %p synched %d\n", __func__, __LINE__, lvif, vap, vap->iv_bss, lvif->lvif_bss, (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, lvif->lvif_bss_synched); #endif LKPI_80211_LVIF_UNLOCK(lvif); error = ENOTRECOVERABLE; goto out; } lsta = lvif->lvif_bss; LKPI_80211_LVIF_UNLOCK(lvif); KASSERT(lsta != NULL, ("%s: lsta %p! lvif %p vap %p\n", __func__, lsta, lvif, vap)); IMPROVE("event callback?"); /* End mgd_complete_tx. */ if (lsta->in_mgd) { memset(&prep_tx_info, 0, sizeof(prep_tx_info)); prep_tx_info.success = false; lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); lsta->in_mgd = false; } /* Now start assoc. */ /* Start mgd_prepare_tx. */ if (!lsta->in_mgd) { memset(&prep_tx_info, 0, sizeof(prep_tx_info)); prep_tx_info.duration = PREP_TX_INFO_DURATION; lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info); lsta->in_mgd = true; } error = 0; out: wiphy_unlock(hw->wiphy); IEEE80211_LOCK(vap->iv_ic); return (error); } static int _lkpi_sta_assoc_to_down(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct ieee80211_node *ni; struct lkpi_sta *lsta; struct ieee80211_sta *sta; struct ieee80211_prep_tx_info prep_tx_info; enum ieee80211_bss_changed bss_changed; int error; lhw = vap->iv_ic->ic_softc; hw = LHW_TO_HW(lhw); lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); IEEE80211_UNLOCK(vap->iv_ic); wiphy_lock(hw->wiphy); LKPI_80211_LVIF_LOCK(lvif); #ifdef LINUXKPI_DEBUG_80211 /* XXX-BZ KASSERT later; state going down so no action. */ if (lvif->lvif_bss == NULL) ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " "lvif_bss->ni %p synched %d\n", __func__, __LINE__, lvif, vap, vap->iv_bss, lvif->lvif_bss, (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, lvif->lvif_bss_synched); #endif lsta = lvif->lvif_bss; LKPI_80211_LVIF_UNLOCK(lvif); KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p " "lvif %p vap %p\n", __func__, lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap)); ni = lsta->ni; /* Reference held for lvif_bss. */ sta = LSTA_TO_STA(lsta); lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* flush, drop. */ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), true); IMPROVE("What are the proper conditions for DEAUTH_NEED_MGD_TX_PREP?"); if (ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP) && !lsta->in_mgd) { memset(&prep_tx_info, 0, sizeof(prep_tx_info)); prep_tx_info.duration = PREP_TX_INFO_DURATION; prep_tx_info.was_assoc = true; lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info); lsta->in_mgd = true; } wiphy_unlock(hw->wiphy); IEEE80211_LOCK(vap->iv_ic); /* Call iv_newstate first so we get potential DEAUTH packet out. */ error = lvif->iv_newstate(vap, nstate, arg); if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: iv_newstate(%p, %d, %d) " "failed: %d\n", __func__, __LINE__, vap, nstate, arg, error); goto outni; } IEEE80211_UNLOCK(vap->iv_ic); /* Ensure the packets get out. */ lkpi_80211_flush_tx(lhw, lsta); wiphy_lock(hw->wiphy); lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* Wake tx queues to get packet(s) out. */ lkpi_wake_tx_queues(hw, sta, false, true); /* flush, no drop */ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), false); /* End mgd_complete_tx. */ if (lsta->in_mgd) { memset(&prep_tx_info, 0, sizeof(prep_tx_info)); prep_tx_info.success = false; prep_tx_info.was_assoc = true; lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); lsta->in_mgd = false; } /* sync_rx_queues */ lkpi_80211_mo_sync_rx_queues(hw); /* sta_pre_rcu_remove */ lkpi_80211_mo_sta_pre_rcu_remove(hw, vif, sta); /* Take the station down. */ /* Update sta and change state (from AUTH) to NONE. */ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); KASSERT(lsta->state == IEEE80211_STA_AUTH, ("%s: lsta %p state not " "AUTH: %#x\n", __func__, lsta, lsta->state)); error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NONE); if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NONE) " "failed: %d\n", __func__, __LINE__, error); goto out; } /* See comment in lkpi_sta_run_to_init(). */ bss_changed = 0; bss_changed |= lkpi_disassoc(sta, vif, lhw); lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* Adjust sta and change state (from NONE) to NOTEXIST. */ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); KASSERT(lsta->state == IEEE80211_STA_NONE, ("%s: lsta %p state not " "NONE: %#x, nstate %d arg %d\n", __func__, lsta, lsta->state, nstate, arg)); error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NOTEXIST); if (error != 0) { IMPROVE("do we need to undo the chan ctx?"); ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NOTEXIST) " "failed: %d\n", __func__, __LINE__, error); goto out; } lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* sta no longer save to use. */ IMPROVE("Any bss_info changes to announce?"); vif->bss_conf.qos = 0; bss_changed |= BSS_CHANGED_QOS; vif->cfg.ssid_len = 0; memset(vif->cfg.ssid, '\0', sizeof(vif->cfg.ssid)); bss_changed |= BSS_CHANGED_BSSID; lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed); LKPI_80211_LVIF_LOCK(lvif); /* Remove ni reference for this cache of lsta. */ lvif->lvif_bss = NULL; lvif->lvif_bss_synched = false; LKPI_80211_LVIF_UNLOCK(lvif); lkpi_lsta_remove(lsta, lvif); /* * The very last release the reference on the ni for the ni/lsta on * lvif->lvif_bss. Upon return from this both ni and lsta are invalid * and potentially freed. */ ieee80211_free_node(ni); /* conf_tx */ lkpi_remove_chanctx(hw, vif); error = EALREADY; out: wiphy_unlock(hw->wiphy); IEEE80211_LOCK(vap->iv_ic); outni: return (error); } static int lkpi_sta_assoc_to_auth(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { int error; error = _lkpi_sta_assoc_to_down(vap, nstate, arg); if (error != 0 && error != EALREADY) return (error); /* At this point iv_bss is long a new node! */ error |= lkpi_sta_scan_to_auth(vap, nstate, 0); return (error); } static int lkpi_sta_assoc_to_scan(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { int error; error = _lkpi_sta_assoc_to_down(vap, nstate, arg); return (error); } static int lkpi_sta_assoc_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { int error; error = _lkpi_sta_assoc_to_down(vap, nstate, arg); return (error); } static int lkpi_sta_assoc_to_run(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct ieee80211_node *ni; struct lkpi_sta *lsta; struct ieee80211_sta *sta; struct ieee80211_prep_tx_info prep_tx_info; enum ieee80211_bss_changed bss_changed; int error; lhw = vap->iv_ic->ic_softc; hw = LHW_TO_HW(lhw); lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); IEEE80211_UNLOCK(vap->iv_ic); wiphy_lock(hw->wiphy); LKPI_80211_LVIF_LOCK(lvif); /* XXX-BZ KASSERT later? */ if (!lvif->lvif_bss_synched || lvif->lvif_bss == NULL) { #ifdef LINUXKPI_DEBUG_80211 ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " "lvif_bss->ni %p synched %d\n", __func__, __LINE__, lvif, vap, vap->iv_bss, lvif->lvif_bss, (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, lvif->lvif_bss_synched); #endif LKPI_80211_LVIF_UNLOCK(lvif); error = ENOTRECOVERABLE; goto out; } lsta = lvif->lvif_bss; LKPI_80211_LVIF_UNLOCK(lvif); KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p " "lvif %p vap %p\n", __func__, lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap)); ni = lsta->ni; /* Reference held for lvif_bss. */ IMPROVE("ponder some of this moved to ic_newassoc, scan_assoc_success, " "and to lesser extend ieee80211_notify_node_join"); /* Finish assoc. */ /* Update sta_state (AUTH to ASSOC) and set aid. */ KASSERT(lsta->state == IEEE80211_STA_AUTH, ("%s: lsta %p state not " "AUTH: %#x\n", __func__, lsta, lsta->state)); sta = LSTA_TO_STA(lsta); sta->aid = IEEE80211_NODE_AID(ni); #ifdef LKPI_80211_WME if (vap->iv_flags & IEEE80211_F_WME) sta->wme = true; #endif error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_ASSOC); if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(ASSOC) " "failed: %d\n", __func__, __LINE__, error); goto out; } IMPROVE("wme / conf_tx [all]"); /* Update bss info (bss_info_changed) (assoc, aid, ..). */ bss_changed = 0; #ifdef LKPI_80211_WME bss_changed |= lkpi_wme_update(lhw, vap, true); #endif if (!vif->cfg.assoc || vif->cfg.aid != IEEE80211_NODE_AID(ni)) { vif->cfg.assoc = true; vif->cfg.aid = IEEE80211_NODE_AID(ni); bss_changed |= BSS_CHANGED_ASSOC; } /* We set SSID but this is not BSSID! */ vif->cfg.ssid_len = ni->ni_esslen; memcpy(vif->cfg.ssid, ni->ni_essid, ni->ni_esslen); if ((vap->iv_flags & IEEE80211_F_SHPREAMBLE) != vif->bss_conf.use_short_preamble) { vif->bss_conf.use_short_preamble ^= 1; /* bss_changed |= BSS_CHANGED_??? */ } if ((vap->iv_flags & IEEE80211_F_SHSLOT) != vif->bss_conf.use_short_slot) { vif->bss_conf.use_short_slot ^= 1; /* bss_changed |= BSS_CHANGED_??? */ } if ((ni->ni_flags & IEEE80211_NODE_QOS) != vif->bss_conf.qos) { vif->bss_conf.qos ^= 1; bss_changed |= BSS_CHANGED_QOS; } bss_changed |= lkpi_update_dtim_tsf(vif, ni, vap, __func__, __LINE__); lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed); /* - change_chanctx (if needed) * - event_callback */ /* End mgd_complete_tx. */ if (lsta->in_mgd) { memset(&prep_tx_info, 0, sizeof(prep_tx_info)); prep_tx_info.success = true; lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); lsta->in_mgd = false; } lkpi_hw_conf_idle(hw, false); /* * And then: * - (more packets)? * - set_key * - set_default_unicast_key * - set_key (?) * - ipv6_addr_change (?) */ /* Prepare_multicast && configure_filter. */ lhw->update_mc = true; lkpi_update_mcast_filter(vap->iv_ic, true); if (!ieee80211_node_is_authorized(ni)) { IMPROVE("net80211 does not consider node authorized"); } sta->deflink.rx_nss = MAX(1, sta->deflink.rx_nss); IMPROVE("Is this the right spot, has net80211 done all updates already?"); lkpi_sta_sync_from_ni(hw, vif, sta, ni, true); /* Update sta_state (ASSOC to AUTHORIZED). */ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); KASSERT(lsta->state == IEEE80211_STA_ASSOC, ("%s: lsta %p state not " "ASSOC: %#x\n", __func__, lsta, lsta->state)); error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_AUTHORIZED); if (error != 0) { IMPROVE("undo some changes?"); ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(AUTHORIZED) " "failed: %d\n", __func__, __LINE__, error); goto out; } /* - drv_config (?) * - bss_info_changed * - set_rekey_data (?) * * And now we should be passing packets. */ IMPROVE("Need that bssid setting, and the keys"); bss_changed = 0; bss_changed |= lkpi_update_dtim_tsf(vif, ni, vap, __func__, __LINE__); lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed); out: wiphy_unlock(hw->wiphy); IEEE80211_LOCK(vap->iv_ic); return (error); } static int lkpi_sta_auth_to_run(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { int error; error = lkpi_sta_auth_to_assoc(vap, nstate, arg); if (error == 0) error = lkpi_sta_assoc_to_run(vap, nstate, arg); return (error); } static int lkpi_sta_run_to_assoc(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct ieee80211_node *ni; struct lkpi_sta *lsta; struct ieee80211_sta *sta; struct ieee80211_prep_tx_info prep_tx_info; #if 0 enum ieee80211_bss_changed bss_changed; #endif int error; lhw = vap->iv_ic->ic_softc; hw = LHW_TO_HW(lhw); lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); LKPI_80211_LVIF_LOCK(lvif); #ifdef LINUXKPI_DEBUG_80211 /* XXX-BZ KASSERT later; state going down so no action. */ if (lvif->lvif_bss == NULL) ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " "lvif_bss->ni %p synched %d\n", __func__, __LINE__, lvif, vap, vap->iv_bss, lvif->lvif_bss, (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, lvif->lvif_bss_synched); #endif lsta = lvif->lvif_bss; LKPI_80211_LVIF_UNLOCK(lvif); KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p " "lvif %p vap %p\n", __func__, lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap)); ni = lsta->ni; /* Reference held for lvif_bss. */ sta = LSTA_TO_STA(lsta); lkpi_lsta_dump(lsta, ni, __func__, __LINE__); IEEE80211_UNLOCK(vap->iv_ic); wiphy_lock(hw->wiphy); /* flush, drop. */ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), true); IMPROVE("What are the proper conditions for DEAUTH_NEED_MGD_TX_PREP?"); if (ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP) && !lsta->in_mgd) { memset(&prep_tx_info, 0, sizeof(prep_tx_info)); prep_tx_info.duration = PREP_TX_INFO_DURATION; prep_tx_info.was_assoc = true; lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info); lsta->in_mgd = true; } wiphy_unlock(hw->wiphy); IEEE80211_LOCK(vap->iv_ic); /* Call iv_newstate first so we get potential DISASSOC packet out. */ error = lvif->iv_newstate(vap, nstate, arg); if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: iv_newstate(%p, %d, %d) " "failed: %d\n", __func__, __LINE__, vap, nstate, arg, error); goto outni; } IEEE80211_UNLOCK(vap->iv_ic); /* Ensure the packets get out. */ lkpi_80211_flush_tx(lhw, lsta); wiphy_lock(hw->wiphy); lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* Wake tx queues to get packet(s) out. */ lkpi_wake_tx_queues(hw, sta, false, true); /* flush, no drop */ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), false); /* End mgd_complete_tx. */ if (lsta->in_mgd) { memset(&prep_tx_info, 0, sizeof(prep_tx_info)); prep_tx_info.success = false; prep_tx_info.was_assoc = true; lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); lsta->in_mgd = false; } #if 0 /* sync_rx_queues */ lkpi_80211_mo_sync_rx_queues(hw); /* sta_pre_rcu_remove */ lkpi_80211_mo_sta_pre_rcu_remove(hw, vif, sta); #endif /* Take the station down. */ /* Adjust sta and change state (from AUTHORIZED) to ASSOC. */ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); KASSERT(lsta->state == IEEE80211_STA_AUTHORIZED, ("%s: lsta %p state not " "AUTHORIZED: %#x\n", __func__, lsta, lsta->state)); error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_ASSOC); if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(ASSOC) " "failed: %d\n", __func__, __LINE__, error); goto out; } lkpi_lsta_dump(lsta, ni, __func__, __LINE__); #ifdef LKPI_80211_HW_CRYPTO if (lkpi_hwcrypto) { error = lkpi_sta_del_keys(hw, vif, lsta); if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: lkpi_sta_del_keys " "failed: %d\n", __func__, __LINE__, error); /* * Either drv/fw will crash or cleanup itself, * otherwise net80211 will delete the keys (at a * less appropriate time). */ /* goto out; */ } } #endif /* Update sta_state (ASSOC to AUTH). */ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); KASSERT(lsta->state == IEEE80211_STA_ASSOC, ("%s: lsta %p state not " "ASSOC: %#x\n", __func__, lsta, lsta->state)); error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_AUTH); if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(AUTH) " "failed: %d\n", __func__, __LINE__, error); goto out; } lkpi_lsta_dump(lsta, ni, __func__, __LINE__); #if 0 /* Update bss info (bss_info_changed) (assoc, aid, ..). */ lkpi_disassoc(sta, vif, lhw); #endif error = EALREADY; out: wiphy_unlock(hw->wiphy); IEEE80211_LOCK(vap->iv_ic); outni: return (error); } static int lkpi_sta_run_to_init(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct ieee80211_node *ni; struct lkpi_sta *lsta; struct ieee80211_sta *sta; struct ieee80211_prep_tx_info prep_tx_info; enum ieee80211_bss_changed bss_changed; int error; lhw = vap->iv_ic->ic_softc; hw = LHW_TO_HW(lhw); lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); IEEE80211_UNLOCK(vap->iv_ic); wiphy_lock(hw->wiphy); LKPI_80211_LVIF_LOCK(lvif); #ifdef LINUXKPI_DEBUG_80211 /* XXX-BZ KASSERT later; state going down so no action. */ if (lvif->lvif_bss == NULL) ic_printf(vap->iv_ic, "%s:%d: lvif %p vap %p iv_bss %p lvif_bss %p " "lvif_bss->ni %p synched %d\n", __func__, __LINE__, lvif, vap, vap->iv_bss, lvif->lvif_bss, (lvif->lvif_bss != NULL) ? lvif->lvif_bss->ni : NULL, lvif->lvif_bss_synched); #endif lsta = lvif->lvif_bss; LKPI_80211_LVIF_UNLOCK(lvif); KASSERT(lsta != NULL && lsta->ni != NULL, ("%s: lsta %p ni %p " "lvif %p vap %p\n", __func__, lsta, (lsta != NULL) ? lsta->ni : NULL, lvif, vap)); ni = lsta->ni; /* Reference held for lvif_bss. */ sta = LSTA_TO_STA(lsta); lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* flush, drop. */ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), true); IMPROVE("What are the proper conditions for DEAUTH_NEED_MGD_TX_PREP?"); if (ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP) && !lsta->in_mgd) { memset(&prep_tx_info, 0, sizeof(prep_tx_info)); prep_tx_info.duration = PREP_TX_INFO_DURATION; prep_tx_info.was_assoc = true; lkpi_80211_mo_mgd_prepare_tx(hw, vif, &prep_tx_info); lsta->in_mgd = true; } wiphy_unlock(hw->wiphy); IEEE80211_LOCK(vap->iv_ic); /* Call iv_newstate first so we get potential DISASSOC packet out. */ error = lvif->iv_newstate(vap, nstate, arg); if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: iv_newstate(%p, %d, %d) " "failed: %d\n", __func__, __LINE__, vap, nstate, arg, error); goto outni; } IEEE80211_UNLOCK(vap->iv_ic); /* Ensure the packets get out. */ lkpi_80211_flush_tx(lhw, lsta); wiphy_lock(hw->wiphy); lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* Wake tx queues to get packet(s) out. */ lkpi_wake_tx_queues(hw, sta, false, true); /* flush, no drop */ lkpi_80211_mo_flush(hw, vif, nitems(sta->txq), false); /* End mgd_complete_tx. */ if (lsta->in_mgd) { memset(&prep_tx_info, 0, sizeof(prep_tx_info)); prep_tx_info.success = false; prep_tx_info.was_assoc = true; lkpi_80211_mo_mgd_complete_tx(hw, vif, &prep_tx_info); lsta->in_mgd = false; } /* sync_rx_queues */ lkpi_80211_mo_sync_rx_queues(hw); /* sta_pre_rcu_remove */ lkpi_80211_mo_sta_pre_rcu_remove(hw, vif, sta); /* Take the station down. */ /* Adjust sta and change state (from AUTHORIZED) to ASSOC. */ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); KASSERT(lsta->state == IEEE80211_STA_AUTHORIZED, ("%s: lsta %p state not " "AUTHORIZED: %#x\n", __func__, lsta, lsta->state)); error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_ASSOC); if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(ASSOC) " "failed: %d\n", __func__, __LINE__, error); goto out; } lkpi_lsta_dump(lsta, ni, __func__, __LINE__); #ifdef LKPI_80211_HW_CRYPTO if (lkpi_hwcrypto) { error = lkpi_sta_del_keys(hw, vif, lsta); if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: lkpi_sta_del_keys " "failed: %d\n", __func__, __LINE__, error); /* * Either drv/fw will crash or cleanup itself, * otherwise net80211 will delete the keys (at a * less appropriate time). */ /* goto out; */ } } #endif /* Update sta_state (ASSOC to AUTH). */ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); KASSERT(lsta->state == IEEE80211_STA_ASSOC, ("%s: lsta %p state not " "ASSOC: %#x\n", __func__, lsta, lsta->state)); error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_AUTH); if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(AUTH) " "failed: %d\n", __func__, __LINE__, error); goto out; } lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* Update sta and change state (from AUTH) to NONE. */ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); KASSERT(lsta->state == IEEE80211_STA_AUTH, ("%s: lsta %p state not " "AUTH: %#x\n", __func__, lsta, lsta->state)); error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NONE); if (error != 0) { ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NONE) " "failed: %d\n", __func__, __LINE__, error); goto out; } bss_changed = 0; /* * Start updating bss info (bss_info_changed) (assoc, aid, ..). * * One would expect this to happen when going off AUTHORIZED. * See comment there; removes the sta from fw if not careful * (bss_info_changed() change is executed right away). * * We need to do this now, before sta changes to IEEE80211_STA_NOTEXIST * as otherwise drivers (iwlwifi at least) will silently not remove * the sta from the firmware and when we will add a new one trigger * a fw assert. * * The order which works best so far avoiding early removal or silent * non-removal seems to be (for iwlwifi::mld-mac80211.c cases; * the iwlwifi:mac80211.c case still to be tested): * 1) lkpi_disassoc(): set vif->cfg.assoc = false (aid=0 side effect here) * 2) call the last sta_state update -> IEEE80211_STA_NOTEXIST * (removes the sta given assoc is false) * 3) add the remaining BSS_CHANGED changes and call bss_info_changed() * 4) call unassign_vif_chanctx * 5) call lkpi_hw_conf_idle * 6) call remove_chanctx */ bss_changed |= lkpi_disassoc(sta, vif, lhw); lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* Adjust sta and change state (from NONE) to NOTEXIST. */ KASSERT(lsta != NULL, ("%s: ni %p lsta is NULL\n", __func__, ni)); KASSERT(lsta->state == IEEE80211_STA_NONE, ("%s: lsta %p state not " "NONE: %#x, nstate %d arg %d\n", __func__, lsta, lsta->state, nstate, arg)); error = lkpi_80211_mo_sta_state(hw, vif, lsta, IEEE80211_STA_NOTEXIST); if (error != 0) { IMPROVE("do we need to undo the chan ctx?"); ic_printf(vap->iv_ic, "%s:%d: mo_sta_state(NOTEXIST) " "failed: %d\n", __func__, __LINE__, error); goto out; } lkpi_lsta_remove(lsta, lvif); lkpi_lsta_dump(lsta, ni, __func__, __LINE__); /* sta no longer save to use. */ IMPROVE("Any bss_info changes to announce?"); vif->bss_conf.qos = 0; bss_changed |= BSS_CHANGED_QOS; vif->cfg.ssid_len = 0; memset(vif->cfg.ssid, '\0', sizeof(vif->cfg.ssid)); bss_changed |= BSS_CHANGED_BSSID; vif->bss_conf.use_short_preamble = false; vif->bss_conf.qos = false; /* XXX BSS_CHANGED_???? */ lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, bss_changed); LKPI_80211_LVIF_LOCK(lvif); /* Remove ni reference for this cache of lsta. */ lvif->lvif_bss = NULL; lvif->lvif_bss_synched = false; LKPI_80211_LVIF_UNLOCK(lvif); /* * The very last release the reference on the ni for the ni/lsta on * lvif->lvif_bss. Upon return from this both ni and lsta are invalid * and potentially freed. */ ieee80211_free_node(ni); /* conf_tx */ lkpi_remove_chanctx(hw, vif); error = EALREADY; out: wiphy_unlock(hw->wiphy); IEEE80211_LOCK(vap->iv_ic); outni: return (error); } static int lkpi_sta_run_to_scan(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { return (lkpi_sta_run_to_init(vap, nstate, arg)); } static int lkpi_sta_run_to_auth(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { int error; error = lkpi_sta_run_to_init(vap, nstate, arg); if (error != 0 && error != EALREADY) return (error); /* At this point iv_bss is long a new node! */ error |= lkpi_sta_scan_to_auth(vap, nstate, 0); return (error); } /* -------------------------------------------------------------------------- */ /* * The matches the documented state changes in net80211::sta_newstate(). * XXX (1) without CSA and SLEEP yet, * XXX (2) not all unhandled cases * there are "invalid" (so there is a room for failure here). */ struct fsm_state { /* INIT, SCAN, AUTH, ASSOC, CAC, RUN, CSA, SLEEP */ enum ieee80211_state ostate; enum ieee80211_state nstate; int (*handler)(struct ieee80211vap *, enum ieee80211_state, int); } sta_state_fsm[] = { { IEEE80211_S_INIT, IEEE80211_S_INIT, lkpi_sta_state_do_nada }, { IEEE80211_S_SCAN, IEEE80211_S_INIT, lkpi_sta_state_do_nada }, /* scan_to_init */ { IEEE80211_S_AUTH, IEEE80211_S_INIT, lkpi_sta_auth_to_init }, /* not explicitly in sta_newstate() */ { IEEE80211_S_ASSOC, IEEE80211_S_INIT, lkpi_sta_assoc_to_init }, /* Send DEAUTH. */ { IEEE80211_S_RUN, IEEE80211_S_INIT, lkpi_sta_run_to_init }, /* Send DISASSOC. */ { IEEE80211_S_INIT, IEEE80211_S_SCAN, lkpi_sta_state_do_nada }, { IEEE80211_S_SCAN, IEEE80211_S_SCAN, lkpi_sta_state_do_nada }, { IEEE80211_S_AUTH, IEEE80211_S_SCAN, lkpi_sta_auth_to_scan }, { IEEE80211_S_ASSOC, IEEE80211_S_SCAN, lkpi_sta_assoc_to_scan }, { IEEE80211_S_RUN, IEEE80211_S_SCAN, lkpi_sta_run_to_scan }, /* Beacon miss. */ { IEEE80211_S_INIT, IEEE80211_S_AUTH, lkpi_sta_scan_to_auth }, /* Send AUTH. */ { IEEE80211_S_SCAN, IEEE80211_S_AUTH, lkpi_sta_scan_to_auth }, /* Send AUTH. */ { IEEE80211_S_AUTH, IEEE80211_S_AUTH, lkpi_sta_a_to_a }, /* Send ?AUTH. */ { IEEE80211_S_ASSOC, IEEE80211_S_AUTH, lkpi_sta_assoc_to_auth }, /* Send ?AUTH. */ { IEEE80211_S_RUN, IEEE80211_S_AUTH, lkpi_sta_run_to_auth }, /* Send ?AUTH. */ { IEEE80211_S_AUTH, IEEE80211_S_ASSOC, lkpi_sta_auth_to_assoc }, /* Send ASSOCREQ. */ { IEEE80211_S_ASSOC, IEEE80211_S_ASSOC, lkpi_sta_a_to_a }, /* Send ASSOCREQ. */ { IEEE80211_S_RUN, IEEE80211_S_ASSOC, lkpi_sta_run_to_assoc }, /* Send ASSOCREQ/REASSOCREQ. */ { IEEE80211_S_AUTH, IEEE80211_S_RUN, lkpi_sta_auth_to_run }, { IEEE80211_S_ASSOC, IEEE80211_S_RUN, lkpi_sta_assoc_to_run }, { IEEE80211_S_RUN, IEEE80211_S_RUN, lkpi_sta_state_do_nada }, /* Dummy at the end without handler. */ { IEEE80211_S_INIT, IEEE80211_S_INIT, NULL }, }; static int lkpi_iv_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic; struct lkpi_hw *lhw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct fsm_state *s; enum ieee80211_state ostate; int error; ic = vap->iv_ic; IEEE80211_LOCK_ASSERT(ic); ostate = vap->iv_state; #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE) ic_printf(vap->iv_ic, "%s:%d: vap %p nstate %#x arg %#x\n", __func__, __LINE__, vap, nstate, arg); #endif if (vap->iv_opmode == IEEE80211_M_STA) { lhw = ic->ic_softc; lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); /* No need to replicate this in most state handlers. */ if (ostate == IEEE80211_S_SCAN && nstate != IEEE80211_S_SCAN) lkpi_stop_hw_scan(lhw, vif); s = sta_state_fsm; } else { ic_printf(vap->iv_ic, "%s: only station mode currently supported: " "cap %p iv_opmode %d\n", __func__, vap, vap->iv_opmode); return (ENOSYS); } error = 0; for (; s->handler != NULL; s++) { if (ostate == s->ostate && nstate == s->nstate) { #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE) ic_printf(vap->iv_ic, "%s: new state %d (%s) ->" " %d (%s): arg %d.\n", __func__, ostate, ieee80211_state_name[ostate], nstate, ieee80211_state_name[nstate], arg); #endif error = s->handler(vap, nstate, arg); break; } } IEEE80211_LOCK_ASSERT(vap->iv_ic); if (s->handler == NULL) { IMPROVE("turn this into a KASSERT\n"); ic_printf(vap->iv_ic, "%s: unsupported state transition " "%d (%s) -> %d (%s)\n", __func__, ostate, ieee80211_state_name[ostate], nstate, ieee80211_state_name[nstate]); return (ENOSYS); } if (error == EALREADY) { #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE) ic_printf(vap->iv_ic, "%s: state transition %d (%s) -> " "%d (%s): iv_newstate already handled: %d.\n", __func__, ostate, ieee80211_state_name[ostate], nstate, ieee80211_state_name[nstate], error); #endif return (0); } if (error != 0) { ic_printf(vap->iv_ic, "%s: error %d during state transition " "%d (%s) -> %d (%s)\n", __func__, error, ostate, ieee80211_state_name[ostate], nstate, ieee80211_state_name[nstate]); return (error); } #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE) ic_printf(vap->iv_ic, "%s:%d: vap %p nstate %#x arg %#x " "calling net80211 parent\n", __func__, __LINE__, vap, nstate, arg); #endif return (lvif->iv_newstate(vap, nstate, arg)); } /* -------------------------------------------------------------------------- */ /* * We overload (*iv_update_bss) as otherwise we have cases in, e.g., * net80211::ieee80211_sta_join1() where vap->iv_bss gets replaced by a * new node without us knowing and thus our ni/lsta are out of sync. */ static struct ieee80211_node * lkpi_iv_update_bss(struct ieee80211vap *vap, struct ieee80211_node *ni) { struct lkpi_vif *lvif; struct ieee80211_node *rni; IEEE80211_LOCK_ASSERT(vap->iv_ic); lvif = VAP_TO_LVIF(vap); LKPI_80211_LVIF_LOCK(lvif); lvif->lvif_bss_synched = false; LKPI_80211_LVIF_UNLOCK(lvif); rni = lvif->iv_update_bss(vap, ni); return (rni); } #ifdef LKPI_80211_WME static int lkpi_wme_update(struct lkpi_hw *lhw, struct ieee80211vap *vap, bool planned) { struct ieee80211com *ic; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct chanAccParams chp; struct wmeParams wmeparr[WME_NUM_AC]; struct ieee80211_tx_queue_params txqp; enum ieee80211_bss_changed changed; int error; uint16_t ac; hw = LHW_TO_HW(lhw); lockdep_assert_wiphy(hw->wiphy); IMPROVE(); KASSERT(WME_NUM_AC == IEEE80211_NUM_ACS, ("%s: WME_NUM_AC %d != " "IEEE80211_NUM_ACS %d\n", __func__, WME_NUM_AC, IEEE80211_NUM_ACS)); if (vap == NULL) return (0); if ((vap->iv_flags & IEEE80211_F_WME) == 0) return (0); if (lhw->ops->conf_tx == NULL) return (0); if (!planned && (vap->iv_state != IEEE80211_S_RUN)) { lhw->update_wme = true; return (0); } lhw->update_wme = false; ic = lhw->ic; ieee80211_wme_ic_getparams(ic, &chp); IEEE80211_LOCK(ic); for (ac = 0; ac < WME_NUM_AC; ac++) wmeparr[ac] = chp.cap_wmeParams[ac]; IEEE80211_UNLOCK(ic); lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); /* Configure tx queues (conf_tx) & send BSS_CHANGED_QOS. */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { struct wmeParams *wmep; wmep = &wmeparr[ac]; bzero(&txqp, sizeof(txqp)); txqp.cw_min = wmep->wmep_logcwmin; txqp.cw_max = wmep->wmep_logcwmax; txqp.txop = wmep->wmep_txopLimit; txqp.aifs = wmep->wmep_aifsn; error = lkpi_80211_mo_conf_tx(hw, vif, /* link_id */0, ac, &txqp); if (error != 0) ic_printf(ic, "%s: conf_tx ac %u failed %d\n", __func__, ac, error); } changed = BSS_CHANGED_QOS; if (!planned) lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, changed); return (changed); } #endif static int lkpi_ic_wme_update(struct ieee80211com *ic) { #ifdef LKPI_80211_WME struct ieee80211vap *vap; struct lkpi_hw *lhw; struct ieee80211_hw *hw; IMPROVE("Use the per-VAP callback in net80211."); vap = TAILQ_FIRST(&ic->ic_vaps); if (vap == NULL) return (0); lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); wiphy_lock(hw->wiphy); lkpi_wme_update(lhw, vap, false); wiphy_unlock(hw->wiphy); #endif return (0); /* unused */ } /* * Change link-layer address on the vif (if the vap is not started/"UP"). * This can happen if a user changes 'ether' using ifconfig. * The code is based on net80211/ieee80211_freebsd.c::wlan_iflladdr() but * we do use a per-[l]vif event handler to be sure we exist as we * cannot assume that from every vap derives a vif and we have a hard * time checking based on net80211 information. * Should this ever become a real problem we could add a callback function * to wlan_iflladdr() to be set optionally but that would be for a * single-consumer (or needs a list) -- was just too complicated for an * otherwise perfect mechanism FreeBSD already provides. */ static void lkpi_vif_iflladdr(void *arg, struct ifnet *ifp) { struct epoch_tracker et; struct ieee80211_vif *vif; NET_EPOCH_ENTER(et); /* NB: identify vap's by if_transmit; left as an extra check. */ if (if_gettransmitfn(ifp) != ieee80211_vap_transmit || (if_getflags(ifp) & IFF_UP) != 0) { NET_EPOCH_EXIT(et); return; } vif = arg; IEEE80211_ADDR_COPY(vif->bss_conf.addr, if_getlladdr(ifp)); NET_EPOCH_EXIT(et); } static struct ieee80211vap * lkpi_ic_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211vap *vap; struct ieee80211_vif *vif; struct ieee80211_tx_queue_params txqp; enum ieee80211_bss_changed changed; struct sysctl_oid *node; size_t len; int error, i; uint16_t ac; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* 1 so far. Add once this works. */ return (NULL); lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); len = sizeof(*lvif); len += hw->vif_data_size; /* vif->drv_priv */ lvif = malloc(len, M_80211_VAP, M_WAITOK | M_ZERO); mtx_init(&lvif->mtx, "lvif", NULL, MTX_DEF); INIT_LIST_HEAD(&lvif->lsta_list); lvif->lvif_bss = NULL; refcount_init(&lvif->nt_unlocked, 0); lvif->lvif_bss_synched = false; vap = LVIF_TO_VAP(lvif); vif = LVIF_TO_VIF(lvif); memcpy(vif->addr, mac, IEEE80211_ADDR_LEN); vif->p2p = false; vif->probe_req_reg = false; vif->type = lkpi_opmode_to_vif_type(opmode); lvif->wdev.iftype = vif->type; /* Need to fill in other fields as well. */ IMPROVE(); /* XXX-BZ hardcoded for now! */ #if 1 RCU_INIT_POINTER(vif->bss_conf.chanctx_conf, NULL); vif->bss_conf.vif = vif; /* vap->iv_myaddr is not set until net80211::vap_setup or vap_attach. */ IEEE80211_ADDR_COPY(vif->bss_conf.addr, mac); lvif->lvif_ifllevent = EVENTHANDLER_REGISTER(iflladdr_event, lkpi_vif_iflladdr, vif, EVENTHANDLER_PRI_ANY); vif->bss_conf.link_id = 0; /* Non-MLO operation. */ vif->bss_conf.chanreq.oper.width = NL80211_CHAN_WIDTH_20_NOHT; vif->bss_conf.use_short_preamble = false; /* vap->iv_flags IEEE80211_F_SHPREAMBLE */ vif->bss_conf.use_short_slot = false; /* vap->iv_flags IEEE80211_F_SHSLOT */ vif->bss_conf.qos = false; vif->bss_conf.use_cts_prot = false; /* vap->iv_protmode */ vif->bss_conf.ht_operation_mode = IEEE80211_HT_OP_MODE_PROTECTION_NONE; vif->cfg.aid = 0; vif->cfg.assoc = false; vif->cfg.idle = true; vif->cfg.ps = false; IMPROVE("Check other fields and then figure out whats is left elsewhere of them"); /* * We need to initialize it to something as the bss_info_changed call * will try to copy from it in iwlwifi and NULL is a panic. * We will set the proper one in scan_to_auth() before being assoc. */ vif->bss_conf.bssid = ieee80211broadcastaddr; #endif #if 0 vif->bss_conf.dtim_period = 0; /* IEEE80211_DTIM_DEFAULT ; must stay 0. */ IEEE80211_ADDR_COPY(vif->bss_conf.bssid, bssid); vif->bss_conf.beacon_int = ic->ic_bintval; /* iwlwifi bug. */ if (vif->bss_conf.beacon_int < 16) vif->bss_conf.beacon_int = 16; #endif /* Link Config */ vif->link_conf[0] = &vif->bss_conf; for (i = 0; i < nitems(vif->link_conf); i++) { IMPROVE("more than 1 link one day"); } /* Setup queue defaults; driver may override in (*add_interface). */ for (i = 0; i < IEEE80211_NUM_ACS; i++) { if (ieee80211_hw_check(hw, QUEUE_CONTROL)) vif->hw_queue[i] = IEEE80211_INVAL_HW_QUEUE; else if (hw->queues >= IEEE80211_NUM_ACS) vif->hw_queue[i] = i; else vif->hw_queue[i] = 0; /* Initialize the queue to running. Stopped? */ lvif->hw_queue_stopped[i] = false; } vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; IMPROVE(); error = lkpi_80211_mo_start(hw); if (error != 0) { ic_printf(ic, "%s: failed to start hw: %d\n", __func__, error); mtx_destroy(&lvif->mtx); free(lvif, M_80211_VAP); return (NULL); } error = lkpi_80211_mo_add_interface(hw, vif); if (error != 0) { IMPROVE(); /* XXX-BZ mo_stop()? */ ic_printf(ic, "%s: failed to add interface: %d\n", __func__, error); mtx_destroy(&lvif->mtx); free(lvif, M_80211_VAP); return (NULL); } LKPI_80211_LHW_LVIF_LOCK(lhw); TAILQ_INSERT_TAIL(&lhw->lvif_head, lvif, lvif_entry); LKPI_80211_LHW_LVIF_UNLOCK(lhw); /* Set bss_info. */ changed = 0; lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, changed); /* Configure tx queues (conf_tx), default WME & send BSS_CHANGED_QOS. */ IMPROVE("Hardcoded values; to fix see 802.11-2016, 9.4.2.29 EDCA Parameter Set element"); wiphy_lock(hw->wiphy); for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { bzero(&txqp, sizeof(txqp)); txqp.cw_min = 15; txqp.cw_max = 1023; txqp.txop = 0; txqp.aifs = 2; error = lkpi_80211_mo_conf_tx(hw, vif, /* link_id */0, ac, &txqp); if (error != 0) ic_printf(ic, "%s: conf_tx ac %u failed %d\n", __func__, ac, error); } wiphy_unlock(hw->wiphy); changed = BSS_CHANGED_QOS; lkpi_80211_mo_bss_info_changed(hw, vif, &vif->bss_conf, changed); /* Force MC init. */ lkpi_update_mcast_filter(ic, true); IMPROVE(); ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* Override with LinuxKPI method so we can drive mac80211/cfg80211. */ lvif->iv_newstate = vap->iv_newstate; vap->iv_newstate = lkpi_iv_newstate; lvif->iv_update_bss = vap->iv_update_bss; vap->iv_update_bss = lkpi_iv_update_bss; #ifdef LKPI_80211_HW_CRYPTO /* Key management. */ if (lkpi_hwcrypto && lhw->ops->set_key != NULL) { vap->iv_key_set = lkpi_iv_key_set; vap->iv_key_delete = lkpi_iv_key_delete; vap->iv_key_update_begin = lkpi_iv_key_update_begin; vap->iv_key_update_end = lkpi_iv_key_update_end; } #endif #ifdef LKPI_80211_HT /* Stay with the iv_ampdu_rxmax,limit / iv_ampdu_density defaults until later. */ #endif ieee80211_ratectl_init(vap); /* Complete setup. */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); #ifdef LKPI_80211_HT /* * Modern chipset/fw/drv will do A-MPDU in drv/fw and fail * to do so if they cannot do the crypto too. */ if (!lkpi_hwcrypto && ieee80211_hw_check(hw, AMPDU_AGGREGATION)) vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_RX; #endif #if defined(LKPI_80211_HT) /* 20250125-BZ Keep A-MPDU TX cleared until we sorted out AddBA for all drivers. */ vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_TX; #endif if (hw->max_listen_interval == 0) hw->max_listen_interval = 7 * (ic->ic_lintval / ic->ic_bintval); hw->conf.listen_interval = hw->max_listen_interval; ic->ic_set_channel(ic); /* XXX-BZ do we need to be able to update these? */ hw->wiphy->frag_threshold = vap->iv_fragthreshold; lkpi_80211_mo_set_frag_threshold(hw, vap->iv_fragthreshold); hw->wiphy->rts_threshold = vap->iv_rtsthreshold; lkpi_80211_mo_set_rts_threshold(hw, vap->iv_rtsthreshold); /* any others? */ /* Add per-VIF/VAP sysctls. */ sysctl_ctx_init(&lvif->sysctl_ctx); node = SYSCTL_ADD_NODE(&lvif->sysctl_ctx, SYSCTL_CHILDREN(&sysctl___compat_linuxkpi_80211), OID_AUTO, if_name(vap->iv_ifp), CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, "VIF Information"); SYSCTL_ADD_PROC(&lvif->sysctl_ctx, SYSCTL_CHILDREN(node), OID_AUTO, "dump_stas", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, lvif, 0, lkpi_80211_dump_stas, "A", "Dump sta statistics of this vif"); IMPROVE(); return (vap); } void linuxkpi_ieee80211_unregister_hw(struct ieee80211_hw *hw) { wiphy_unregister(hw->wiphy); linuxkpi_ieee80211_ifdetach(hw); IMPROVE(); } void linuxkpi_ieee80211_restart_hw(struct ieee80211_hw *hw) { TODO(); } static void lkpi_ic_vap_delete(struct ieee80211vap *vap) { struct ieee80211com *ic; struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); ic = vap->iv_ic; lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); EVENTHANDLER_DEREGISTER(iflladdr_event, lvif->lvif_ifllevent); /* Clear up per-VIF/VAP sysctls. */ sysctl_ctx_free(&lvif->sysctl_ctx); LKPI_80211_LHW_LVIF_LOCK(lhw); TAILQ_REMOVE(&lhw->lvif_head, lvif, lvif_entry); LKPI_80211_LHW_LVIF_UNLOCK(lhw); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); IMPROVE("clear up other bits in this state"); lkpi_80211_mo_remove_interface(hw, vif); /* Single VAP, so we can do this here. */ lkpi_80211_mo_stop(hw, false); /* XXX SUSPEND */ mtx_destroy(&lvif->mtx); free(lvif, M_80211_VAP); } static void lkpi_ic_update_mcast(struct ieee80211com *ic) { lkpi_update_mcast_filter(ic, false); TRACEOK(); } static void lkpi_ic_update_promisc(struct ieee80211com *ic) { UNIMPLEMENTED; } static void lkpi_ic_update_chw(struct ieee80211com *ic) { UNIMPLEMENTED; } /* Start / stop device. */ static void lkpi_ic_parent(struct ieee80211com *ic) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; #ifdef HW_START_STOP int error; #endif bool start_all; IMPROVE(); lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); start_all = false; /* IEEE80211_UNLOCK(ic); */ wiphy_lock(hw->wiphy); if (ic->ic_nrunning > 0) { #ifdef HW_START_STOP error = lkpi_80211_mo_start(hw); if (error == 0) #endif start_all = true; } else { #ifdef HW_START_STOP lkpi_80211_mo_stop(hw, false); /* XXX SUSPEND */ #endif } wiphy_unlock(hw->wiphy); /* IEEE80211_LOCK(ic); */ if (start_all) ieee80211_start_all(ic); } bool linuxkpi_ieee80211_is_ie_id_in_ie_buf(const u8 ie, const u8 *ie_ids, size_t ie_ids_len) { int i; for (i = 0; i < ie_ids_len; i++) { if (ie == *ie_ids) return (true); } return (false); } /* Return true if skipped; false if error. */ bool linuxkpi_ieee80211_ie_advance(size_t *xp, const u8 *ies, size_t ies_len) { size_t x; uint8_t l; x = *xp; KASSERT(x < ies_len, ("%s: x %zu ies_len %zu ies %p\n", __func__, x, ies_len, ies)); l = ies[x + 1]; x += 2 + l; if (x > ies_len) return (false); *xp = x; return (true); } static uint8_t * lkpi_scan_ies_add(uint8_t *p, struct ieee80211_scan_ies *scan_ies, uint32_t band_mask, struct ieee80211vap *vap, struct ieee80211_hw *hw) { struct ieee80211_supported_band *supband; struct linuxkpi_ieee80211_channel *channels; struct ieee80211com *ic; const struct ieee80211_channel *chan; const struct ieee80211_rateset *rs; uint8_t *pb; int band, i; ic = vap->iv_ic; for (band = 0; band < NUM_NL80211_BANDS; band++) { if ((band_mask & (1 << band)) == 0) continue; supband = hw->wiphy->bands[band]; /* * This should not happen; * band_mask is a bitmask of valid bands to scan on. */ if (supband == NULL || supband->n_channels == 0) continue; /* Find a first channel to get the mode and rates from. */ channels = supband->channels; chan = NULL; for (i = 0; i < supband->n_channels; i++) { if (channels[i].flags & IEEE80211_CHAN_DISABLED) continue; chan = ieee80211_find_channel(ic, channels[i].center_freq, 0); if (chan != NULL) break; } /* This really should not happen. */ if (chan == NULL) continue; pb = p; rs = ieee80211_get_suprates(ic, chan); /* calls chan2mode */ p = ieee80211_add_rates(p, rs); p = ieee80211_add_xrates(p, rs); #if defined(LKPI_80211_HT) if ((vap->iv_flags_ht & IEEE80211_FHT_HT) != 0) { struct ieee80211_channel *c; c = ieee80211_ht_adjust_channel(ic, ic->ic_curchan, vap->iv_flags_ht); p = ieee80211_add_htcap_ch(p, vap, c); } #endif #if defined(LKPI_80211_VHT) if (band == NL80211_BAND_5GHZ && (vap->iv_vht_flags & IEEE80211_FVHT_VHT) != 0) { struct ieee80211_channel *c; c = ieee80211_ht_adjust_channel(ic, ic->ic_curchan, vap->iv_flags_ht); c = ieee80211_vht_adjust_channel(ic, c, vap->iv_vht_flags); p = ieee80211_add_vhtcap_ch(p, vap, c); } #endif scan_ies->ies[band] = pb; scan_ies->len[band] = p - pb; } /* Add common_ies */ pb = p; if ((vap->iv_flags & IEEE80211_F_WPA1) != 0 && vap->iv_wpa_ie != NULL) { memcpy(p, vap->iv_wpa_ie, 2 + vap->iv_wpa_ie[1]); p += 2 + vap->iv_wpa_ie[1]; } if (vap->iv_appie_probereq != NULL) { memcpy(p, vap->iv_appie_probereq->ie_data, vap->iv_appie_probereq->ie_len); p += vap->iv_appie_probereq->ie_len; } scan_ies->common_ies = pb; scan_ies->common_ie_len = p - pb; return (p); } static void lkpi_ic_scan_start(struct ieee80211com *ic) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct ieee80211_scan_state *ss; struct ieee80211vap *vap; int error; bool is_hw_scan; lhw = ic->ic_softc; LKPI_80211_LHW_SCAN_LOCK(lhw); if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0) { /* A scan is still running. */ LKPI_80211_LHW_SCAN_UNLOCK(lhw); return; } is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0; LKPI_80211_LHW_SCAN_UNLOCK(lhw); ss = ic->ic_scan; vap = ss->ss_vap; if (vap->iv_state != IEEE80211_S_SCAN) { IMPROVE("We need to be able to scan if not in S_SCAN"); return; } hw = LHW_TO_HW(lhw); if (!is_hw_scan) { /* If hw_scan is cleared clear FEXT_SCAN_OFFLOAD too. */ vap->iv_flags_ext &= ~IEEE80211_FEXT_SCAN_OFFLOAD; sw_scan: lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); if (vap->iv_state == IEEE80211_S_SCAN) lkpi_hw_conf_idle(hw, false); lkpi_80211_mo_sw_scan_start(hw, vif, vif->addr); /* net80211::scan_start() handled PS for us. */ IMPROVE(); /* XXX Also means it is too late to flush queues? * need to check iv_sta_ps or overload? */ /* XXX want to adjust ss end time/ maxdwell? */ } else { struct ieee80211_scan_request *hw_req; struct linuxkpi_ieee80211_channel *lc, **cpp; struct cfg80211_ssid *ssids; struct cfg80211_scan_6ghz_params *s6gp; size_t chan_len, nchan, ssids_len, s6ghzlen; int band, i, ssid_count, common_ie_len; uint32_t band_mask; uint8_t *ie, *ieend; bool running; ssid_count = min(ss->ss_nssid, hw->wiphy->max_scan_ssids); ssids_len = ssid_count * sizeof(*ssids); s6ghzlen = 0 * (sizeof(*s6gp)); /* XXX-BZ */ band_mask = 0; nchan = 0; if (ieee80211_hw_check(hw, SINGLE_SCAN_ON_ALL_BANDS)) { #if 0 /* Avoid net80211 scan lists until it has proper scan offload support. */ for (i = ss->ss_next; i < ss->ss_last; i++) { nchan++; band = lkpi_net80211_chan_to_nl80211_band( ss->ss_chans[ss->ss_next + i]); band_mask |= (1 << band); } #else /* Instead we scan for all channels all the time. */ for (band = 0; band < NUM_NL80211_BANDS; band++) { switch (band) { case NL80211_BAND_2GHZ: case NL80211_BAND_5GHZ: break; default: continue; } if (hw->wiphy->bands[band] != NULL) { nchan += hw->wiphy->bands[band]->n_channels; band_mask |= (1 << band); } } #endif } else { IMPROVE("individual band scans not yet supported, only scanning first band"); /* In theory net80211 should drive this. */ /* Probably we need to add local logic for now; * need to deal with scan_complete * and cancel_scan and keep local state. * Also cut the nchan down above. */ /* XXX-BZ ath10k does not set this but still does it? &$%^ */ } chan_len = nchan * (sizeof(lc) + sizeof(*lc)); common_ie_len = 0; if ((vap->iv_flags & IEEE80211_F_WPA1) != 0 && vap->iv_wpa_ie != NULL) common_ie_len += vap->iv_wpa_ie[1]; if (vap->iv_appie_probereq != NULL) common_ie_len += vap->iv_appie_probereq->ie_len; /* We would love to check this at an earlier stage... */ if (common_ie_len > hw->wiphy->max_scan_ie_len) { ic_printf(ic, "WARNING: %s: common_ie_len %d > " "wiphy->max_scan_ie_len %d\n", __func__, common_ie_len, hw->wiphy->max_scan_ie_len); } hw_req = malloc(sizeof(*hw_req) + ssids_len + s6ghzlen + chan_len + lhw->supbands * lhw->scan_ie_len + common_ie_len, M_LKPI80211, M_WAITOK | M_ZERO); hw_req->req.flags = 0; /* XXX ??? */ /* hw_req->req.wdev */ hw_req->req.wiphy = hw->wiphy; hw_req->req.no_cck = false; /* XXX */ #if 0 /* This seems to pessimise default scanning behaviour. */ hw_req->req.duration_mandatory = TICKS_2_USEC(ss->ss_mindwell); hw_req->req.duration = TICKS_2_USEC(ss->ss_maxdwell); #endif #ifdef __notyet__ hw_req->req.flags |= NL80211_SCAN_FLAG_RANDOM_ADDR; memcpy(hw_req->req.mac_addr, xxx, IEEE80211_ADDR_LEN); memset(hw_req->req.mac_addr_mask, 0xxx, IEEE80211_ADDR_LEN); #endif eth_broadcast_addr(hw_req->req.bssid); hw_req->req.n_channels = nchan; cpp = (struct linuxkpi_ieee80211_channel **)(hw_req + 1); lc = (struct linuxkpi_ieee80211_channel *)(cpp + nchan); for (i = 0; i < nchan; i++) { *(cpp + i) = (struct linuxkpi_ieee80211_channel *)(lc + i); } #if 0 /* Avoid net80211 scan lists until it has proper scan offload support. */ for (i = 0; i < nchan; i++) { struct ieee80211_channel *c; c = ss->ss_chans[ss->ss_next + i]; lc->hw_value = c->ic_ieee; lc->center_freq = c->ic_freq; /* XXX */ /* lc->flags */ lc->band = lkpi_net80211_chan_to_nl80211_band(c); lc->max_power = c->ic_maxpower; /* lc-> ... */ lc++; } #else for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *supband; struct linuxkpi_ieee80211_channel *channels; /* Band disabled for scanning? */ if ((band_mask & (1 << band)) == 0) continue; /* Nothing to scan in band? */ supband = hw->wiphy->bands[band]; if (supband == NULL || supband->n_channels == 0) continue; channels = supband->channels; for (i = 0; i < supband->n_channels; i++) { *lc = channels[i]; lc++; } } #endif hw_req->req.n_ssids = ssid_count; if (hw_req->req.n_ssids > 0) { ssids = (struct cfg80211_ssid *)lc; hw_req->req.ssids = ssids; for (i = 0; i < ssid_count; i++) { ssids->ssid_len = ss->ss_ssid[i].len; memcpy(ssids->ssid, ss->ss_ssid[i].ssid, ss->ss_ssid[i].len); ssids++; } s6gp = (struct cfg80211_scan_6ghz_params *)ssids; } else { s6gp = (struct cfg80211_scan_6ghz_params *)lc; } /* 6GHz one day. */ hw_req->req.n_6ghz_params = 0; hw_req->req.scan_6ghz_params = NULL; hw_req->req.scan_6ghz = false; /* Weird boolean; not what you think. */ /* s6gp->... */ ie = ieend = (uint8_t *)s6gp; /* Copy per-band IEs, copy common IEs */ ieend = lkpi_scan_ies_add(ie, &hw_req->ies, band_mask, vap, hw); hw_req->req.ie = ie; hw_req->req.ie_len = ieend - ie; lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); LKPI_80211_LHW_SCAN_LOCK(lhw); /* Re-check under lock. */ running = (lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0; if (!running) { KASSERT(lhw->hw_req == NULL, ("%s: ic %p lhw %p hw_req %p " "!= NULL\n", __func__, ic, lhw, lhw->hw_req)); lhw->scan_flags |= LKPI_LHW_SCAN_RUNNING; lhw->hw_req = hw_req; } LKPI_80211_LHW_SCAN_UNLOCK(lhw); if (running) { free(hw_req, M_LKPI80211); return; } error = lkpi_80211_mo_hw_scan(hw, vif, hw_req); if (error != 0) { ieee80211_cancel_scan(vap); /* * ieee80211_scan_completed must be called in either * case of error or none. So let the free happen there * and only there. * That would be fine in theory but in practice drivers * behave differently: * ath10k does not return hw_scan until after scan_complete * and can then still return an error. * rtw88 can return 1 or -EBUSY without scan_complete * iwlwifi can return various errors before scan starts * ... * So we cannot rely on that behaviour and have to check * and balance between both code paths. */ LKPI_80211_LHW_SCAN_LOCK(lhw); if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) != 0) { free(lhw->hw_req, M_LKPI80211); lhw->hw_req = NULL; lhw->scan_flags &= ~LKPI_LHW_SCAN_RUNNING; } LKPI_80211_LHW_SCAN_UNLOCK(lhw); /* * XXX-SIGH magic number. * rtw88 has a magic "return 1" if offloading scan is * not possible. Fall back to sw scan in that case. */ if (error == 1) { LKPI_80211_LHW_SCAN_LOCK(lhw); lhw->scan_flags &= ~LKPI_LHW_SCAN_HW; LKPI_80211_LHW_SCAN_UNLOCK(lhw); /* * XXX If we clear this now and later a driver * thinks it * can do a hw_scan again, we will * currently not re-enable it? */ vap->iv_flags_ext &= ~IEEE80211_FEXT_SCAN_OFFLOAD; ieee80211_start_scan(vap, IEEE80211_SCAN_ACTIVE | IEEE80211_SCAN_NOPICK | IEEE80211_SCAN_ONCE, IEEE80211_SCAN_FOREVER, ss->ss_mindwell ? ss->ss_mindwell : msecs_to_ticks(20), ss->ss_maxdwell ? ss->ss_maxdwell : msecs_to_ticks(200), vap->iv_des_nssid, vap->iv_des_ssid); goto sw_scan; } ic_printf(ic, "ERROR: %s: hw_scan returned %d\n", __func__, error); } } } static void lkpi_ic_scan_end(struct ieee80211com *ic) { struct lkpi_hw *lhw; bool is_hw_scan; lhw = ic->ic_softc; LKPI_80211_LHW_SCAN_LOCK(lhw); if ((lhw->scan_flags & LKPI_LHW_SCAN_RUNNING) == 0) { LKPI_80211_LHW_SCAN_UNLOCK(lhw); return; } is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0; LKPI_80211_LHW_SCAN_UNLOCK(lhw); if (!is_hw_scan) { struct ieee80211_scan_state *ss; struct ieee80211vap *vap; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; ss = ic->ic_scan; vap = ss->ss_vap; hw = LHW_TO_HW(lhw); lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); lkpi_80211_mo_sw_scan_complete(hw, vif); /* Send PS to stop buffering if n80211 does not for us? */ if (vap->iv_state == IEEE80211_S_SCAN) lkpi_hw_conf_idle(hw, true); } } static void lkpi_ic_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { struct lkpi_hw *lhw; bool is_hw_scan; lhw = ss->ss_ic->ic_softc; LKPI_80211_LHW_SCAN_LOCK(lhw); is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0; LKPI_80211_LHW_SCAN_UNLOCK(lhw); if (!is_hw_scan) lhw->ic_scan_curchan(ss, maxdwell); } static void lkpi_ic_scan_mindwell(struct ieee80211_scan_state *ss) { struct lkpi_hw *lhw; bool is_hw_scan; lhw = ss->ss_ic->ic_softc; LKPI_80211_LHW_SCAN_LOCK(lhw); is_hw_scan = (lhw->scan_flags & LKPI_LHW_SCAN_HW) != 0; LKPI_80211_LHW_SCAN_UNLOCK(lhw); if (!is_hw_scan) lhw->ic_scan_mindwell(ss); } static void lkpi_ic_set_channel(struct ieee80211com *ic) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct ieee80211_channel *c; struct linuxkpi_ieee80211_channel *chan; int error; bool hw_scan_running; lhw = ic->ic_softc; /* If we do not support (*config)() save us the work. */ if (lhw->ops->config == NULL) return; /* If we have a hw_scan running do not switch channels. */ LKPI_80211_LHW_SCAN_LOCK(lhw); hw_scan_running = (lhw->scan_flags & (LKPI_LHW_SCAN_RUNNING|LKPI_LHW_SCAN_HW)) == (LKPI_LHW_SCAN_RUNNING|LKPI_LHW_SCAN_HW); LKPI_80211_LHW_SCAN_UNLOCK(lhw); if (hw_scan_running) return; c = ic->ic_curchan; if (c == NULL || c == IEEE80211_CHAN_ANYC) { ic_printf(ic, "%s: c %p ops->config %p\n", __func__, c, lhw->ops->config); return; } chan = lkpi_find_lkpi80211_chan(lhw, c); if (chan == NULL) { ic_printf(ic, "%s: c %p chan %p\n", __func__, c, chan); return; } /* XXX max power for scanning? */ IMPROVE(); hw = LHW_TO_HW(lhw); cfg80211_chandef_create(&hw->conf.chandef, chan, #ifdef LKPI_80211_HT (ic->ic_flags_ht & IEEE80211_FHT_HT) ? NL80211_CHAN_HT20 : #endif NL80211_CHAN_NO_HT); error = lkpi_80211_mo_config(hw, IEEE80211_CONF_CHANGE_CHANNEL); if (error != 0 && error != EOPNOTSUPP) { ic_printf(ic, "ERROR: %s: config %#0x returned %d\n", __func__, IEEE80211_CONF_CHANGE_CHANNEL, error); /* XXX should we unroll to the previous chandef? */ IMPROVE(); } else { /* Update radiotap channels as well. */ lhw->rtap_tx.wt_chan_freq = htole16(c->ic_freq); lhw->rtap_tx.wt_chan_flags = htole16(c->ic_flags); lhw->rtap_rx.wr_chan_freq = htole16(c->ic_freq); lhw->rtap_rx.wr_chan_flags = htole16(c->ic_flags); } /* Currently PS is hard coded off! Not sure it belongs here. */ IMPROVE(); if (ieee80211_hw_check(hw, SUPPORTS_PS) && (hw->conf.flags & IEEE80211_CONF_PS) != 0) { hw->conf.flags &= ~IEEE80211_CONF_PS; error = lkpi_80211_mo_config(hw, IEEE80211_CONF_CHANGE_PS); if (error != 0 && error != EOPNOTSUPP) ic_printf(ic, "ERROR: %s: config %#0x returned " "%d\n", __func__, IEEE80211_CONF_CHANGE_PS, error); } } static struct ieee80211_node * lkpi_ic_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211com *ic; struct lkpi_hw *lhw; struct ieee80211_node *ni; struct ieee80211_hw *hw; struct lkpi_sta *lsta; ic = vap->iv_ic; lhw = ic->ic_softc; /* We keep allocations de-coupled so we can deal with the two worlds. */ if (lhw->ic_node_alloc == NULL) return (NULL); ni = lhw->ic_node_alloc(vap, mac); if (ni == NULL) return (NULL); hw = LHW_TO_HW(lhw); lsta = lkpi_lsta_alloc(vap, mac, hw, ni); if (lsta == NULL) { if (lhw->ic_node_free != NULL) lhw->ic_node_free(ni); return (NULL); } return (ni); } static int lkpi_ic_node_init(struct ieee80211_node *ni) { struct ieee80211com *ic; struct lkpi_hw *lhw; int error; ic = ni->ni_ic; lhw = ic->ic_softc; if (lhw->ic_node_init != NULL) { error = lhw->ic_node_init(ni); if (error != 0) return (error); } /* XXX-BZ Sync other state over. */ IMPROVE(); return (0); } static void lkpi_ic_node_cleanup(struct ieee80211_node *ni) { struct ieee80211com *ic; struct lkpi_hw *lhw; ic = ni->ni_ic; lhw = ic->ic_softc; /* XXX-BZ remove from driver, ... */ IMPROVE(); if (lhw->ic_node_cleanup != NULL) lhw->ic_node_cleanup(ni); } static void lkpi_ic_node_free(struct ieee80211_node *ni) { struct ieee80211com *ic; struct lkpi_hw *lhw; struct lkpi_sta *lsta; ic = ni->ni_ic; lhw = ic->ic_softc; lsta = ni->ni_drv_data; /* KASSERT lsta is not NULL here. Print ni/ni__refcnt. */ /* * Pass in the original ni just in case of error we could check that * it is the same as lsta->ni. */ lkpi_lsta_free(lsta, ni); if (lhw->ic_node_free != NULL) lhw->ic_node_free(ni); } /* * lkpi_xmit() called from both the (*ic_raw_xmit) as well as the (*ic_transmit) * call path. * Unfortunately they have slightly different invariants. See * ieee80211_raw_output() and ieee80211_parent_xmitpkt(). * Both take care of the ni reference in case of error, and otherwise during * the callback after transmit. * The difference is that in case of error (*ic_raw_xmit) needs us to release * the mbuf, while (*ic_transmit) will free the mbuf itself. */ static int lkpi_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params __unused, bool freem) { struct lkpi_sta *lsta; int error; lsta = ni->ni_drv_data; LKPI_80211_LSTA_TXQ_LOCK(lsta); #if 0 if (!lsta->added_to_drv || !lsta->txq_ready) { #else /* * Backout this part of 886653492945f which breaks rtw88 or * in general drivers without (*sta_state)() but only the * legacy fallback to (*sta_add)(). */ if (!lsta->txq_ready) { #endif LKPI_80211_LSTA_TXQ_UNLOCK(lsta); if (freem) m_free(m); return (ENETDOWN); } /* Queue the packet and enqueue the task to handle it. */ error = mbufq_enqueue(&lsta->txq, m); if (error != 0) { LKPI_80211_LSTA_TXQ_UNLOCK(lsta); if (freem) m_free(m); #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_TX) ic_printf(ni->ni_ic, "%s: mbufq_enqueue failed: %d\n", __func__, error); #endif return (ENETDOWN); } taskqueue_enqueue(taskqueue_thread, &lsta->txq_task); LKPI_80211_LSTA_TXQ_UNLOCK(lsta); #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_TX) printf("%s:%d lsta %p ni %p %6D mbuf_qlen %d\n", __func__, __LINE__, lsta, ni, ni->ni_macaddr, ":", mbufq_len(&lsta->txq)); #endif return (0); } static int lkpi_ic_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params __unused) { return (lkpi_xmit(ni, m, NULL, true)); } #ifdef LKPI_80211_HW_CRYPTO /* * This is a bit of a hack given we know we are operating on a * single frame and we know that hardware will deal with it. * But otherwise the enmic bit and the encrypt bit need to be * decoupled. */ static int lkpi_hw_crypto_prepare_tkip(struct ieee80211_key *k, struct ieee80211_key_conf *kc, struct sk_buff *skb) { struct ieee80211_hdr *hdr; uint32_t hlen, hdrlen; uint8_t *p; /* * TKIP only happens on data. */ hdr = (void *)skb->data; if (!ieee80211_is_data_present(hdr->frame_control)) return (0); /* * "enmic" (though we do not do that). */ /* any conditions to not apply this? */ if (skb_tailroom(skb) < k->wk_cipher->ic_miclen) return (ENOBUFS); p = skb_put(skb, k->wk_cipher->ic_miclen); if ((kc->flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) != 0) goto encrypt; /* * (*enmic) which we hopefully do not have to do with hw accel. * That means if we make it here we have a problem. */ TODO("(*enmic)"); return (ENXIO); encrypt: /* * "encrypt" (though we do not do that). */ /* * Check if we have anything to do as requested by driver * or if we are done? */ if ((kc->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) == 0 && (kc->flags & IEEE80211_KEY_FLAG_GENERATE_IV) == 0) return (0); hlen = k->wk_cipher->ic_header; if (skb_headroom(skb) < hlen) return (ENOBUFS); hdr = (void *)skb->data; hdrlen = ieee80211_hdrlen(hdr->frame_control); p = skb_push(skb, hlen); memmove(p, p + hlen, hdrlen); /* If driver request space only we are done. */ if ((kc->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) != 0) return (0); p += hdrlen; k->wk_cipher->ic_setiv(k, p); /* If we make it hear we do sw encryption. */ TODO("sw encrypt"); return (ENXIO); } static int lkpi_hw_crypto_prepare_ccmp(struct ieee80211_key *k, struct ieee80211_key_conf *kc, struct sk_buff *skb) { struct ieee80211_hdr *hdr; uint32_t hlen, hdrlen; uint8_t *p; hdr = (void *)skb->data; /* * Check if we have anythig to do as requested by driver * or if we are done? */ if ((kc->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) == 0 && (kc->flags & IEEE80211_KEY_FLAG_GENERATE_IV) == 0 && /* MFP */ !((kc->flags & IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) != 0 && ieee80211_is_mgmt(hdr->frame_control))) return (0); hlen = k->wk_cipher->ic_header; if (skb_headroom(skb) < hlen) return (ENOBUFS); hdrlen = ieee80211_hdrlen(hdr->frame_control); p = skb_push(skb, hlen); memmove(p, p + hlen, hdrlen); /* If driver request space only we are done. */ if ((kc->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) != 0) return (0); p += hdrlen; k->wk_cipher->ic_setiv(k, p); return (0); } static int lkpi_hw_crypto_prepare(struct lkpi_sta *lsta, struct ieee80211_key *k, struct sk_buff *skb) { struct ieee80211_tx_info *info; struct ieee80211_key_conf *kc; KASSERT(lsta != NULL, ("%s: lsta is NULL", __func__)); KASSERT(k != NULL, ("%s: key is NULL", __func__)); KASSERT(skb != NULL, ("%s: skb is NULL", __func__)); kc = lsta->kc[k->wk_keyix]; info = IEEE80211_SKB_CB(skb); info->control.hw_key = kc; /* MUST NOT happen. KASSERT? */ if (kc == NULL) { ic_printf(lsta->ni->ni_ic, "%s: lsta %p k %p skb %p, " "kc is NULL on hw crypto offload\n", __func__, lsta, k, skb); return (ENXIO); } switch (kc->cipher) { case WLAN_CIPHER_SUITE_TKIP: return (lkpi_hw_crypto_prepare_tkip(k, kc, skb)); case WLAN_CIPHER_SUITE_CCMP: return (lkpi_hw_crypto_prepare_ccmp(k, kc, skb)); case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: default: ic_printf(lsta->ni->ni_ic, "%s: lsta %p k %p kc %p skb %p, " "unsupported cipher suite %u (%s)\n", __func__, lsta, k, kc, skb, kc->cipher, lkpi_cipher_suite_to_name(kc->cipher)); return (EOPNOTSUPP); } } static uint8_t lkpi_hw_crypto_tailroom(struct lkpi_sta *lsta, struct ieee80211_key *k) { struct ieee80211_key_conf *kc; kc = lsta->kc[k->wk_keyix]; if (kc == NULL) return (0); IMPROVE("which other flags need tailroom?"); if (kc->flags & (IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) return (32); /* Large enough to hold everything and pow2. */ return (0); } #endif static void lkpi_80211_txq_tx_one(struct lkpi_sta *lsta, struct mbuf *m) { struct ieee80211_node *ni; struct ieee80211_frame *wh; struct ieee80211_key *k; struct sk_buff *skb; struct ieee80211com *ic; struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct ieee80211_channel *c; struct ieee80211_tx_control control; struct ieee80211_tx_info *info; struct ieee80211_sta *sta; struct ieee80211_hdr *hdr; struct lkpi_txq *ltxq; void *buf; ieee80211_keyix keyix; uint8_t ac, tid, tailroom; M_ASSERTPKTHDR(m); #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_TX_DUMP) hexdump(mtod(m, const void *), m->m_len, "RAW TX (plain) ", 0); #endif ni = lsta->ni; k = NULL; keyix = IEEE80211_KEYIX_NONE; wh = mtod(m, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { #ifdef LKPI_80211_HW_CRYPTO if (lkpi_hwcrypto) { k = ieee80211_crypto_get_txkey(ni, m); if (k != NULL && lsta->kc[k->wk_keyix] != NULL) keyix = k->wk_keyix; } #endif /* Encrypt the frame if need be. */ if (keyix == IEEE80211_KEYIX_NONE) { /* Retrieve key for TX && do software encryption. */ k = ieee80211_crypto_encap(ni, m); if (k == NULL) { ieee80211_free_node(ni); m_freem(m); return; } } } ic = ni->ni_ic; lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); c = ni->ni_chan; if (ieee80211_radiotap_active_vap(ni->ni_vap)) { struct lkpi_radiotap_tx_hdr *rtap; rtap = &lhw->rtap_tx; rtap->wt_flags = 0; if (k != NULL) rtap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (m->m_flags & M_FRAG) rtap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG; IMPROVE(); rtap->wt_rate = 0; if (c != NULL && c != IEEE80211_CHAN_ANYC) { rtap->wt_chan_freq = htole16(c->ic_freq); rtap->wt_chan_flags = htole16(c->ic_flags); } ieee80211_radiotap_tx(ni->ni_vap, m); } #ifdef LKPI_80211_HW_CRYPTO if (lkpi_hwcrypto && keyix != IEEE80211_KEYIX_NONE) tailroom = lkpi_hw_crypto_tailroom(lsta, k); else #endif tailroom = 0; /* * net80211 should handle hw->extra_tx_headroom. * Though for as long as we are copying we don't mind. * XXX-BZ rtw88 asks for too much headroom for ipv6+tcp: * https://lists.freebsd.org/archives/freebsd-transport/2022-February/000012.html */ skb = dev_alloc_skb(hw->extra_tx_headroom + tailroom + m->m_pkthdr.len); if (skb == NULL) { static uint8_t skb_alloc_failures = 0; if (skb_alloc_failures++ == 0) { int tid; sta = LSTA_TO_STA(lsta); ic_printf(ic, "ERROR %s: skb alloc failed %d + %d, lsta %p sta %p ni %p\n", __func__, hw->extra_tx_headroom, m->m_pkthdr.len, lsta, sta, ni); for (tid = 0; tid < nitems(sta->txq); tid++) { if (sta->txq[tid] == NULL) continue; ltxq = TXQ_TO_LTXQ(sta->txq[tid]); ic_printf(ic, " tid %d ltxq %p seen_dequeue %d stopped %d skb_queue_len %u\n", tid, ltxq, ltxq->seen_dequeue, ltxq->stopped, skb_queue_len(<xq->skbq)); } } ieee80211_free_node(ni); m_freem(m); return; } skb_reserve(skb, hw->extra_tx_headroom); /* XXX-BZ we need a SKB version understanding mbuf. */ /* Save the mbuf for ieee80211_tx_complete(). */ skb->m_free_func = lkpi_ieee80211_free_skb_mbuf; skb->m = m; #if 0 skb_put_data(skb, m->m_data, m->m_pkthdr.len); #else buf = skb_put(skb, m->m_pkthdr.len); m_copydata(m, 0, m->m_pkthdr.len, buf); #endif /* Save the ni. */ m->m_pkthdr.PH_loc.ptr = ni; lvif = VAP_TO_LVIF(ni->ni_vap); vif = LVIF_TO_VIF(lvif); hdr = (void *)skb->data; tid = linuxkpi_ieee80211_get_tid(hdr, true); if (tid == IEEE80211_NONQOS_TID) { /* == IEEE80211_NUM_TIDS */ if (!ieee80211_is_data(hdr->frame_control)) { /* MGMT and CTRL frames go on TID 7/VO. */ skb->priority = 7; ac = IEEE80211_AC_VO; } else { /* Other non-QOS traffic goes to BE. */ /* Contrary to net80211 we MUST NOT promote M_EAPOL. */ skb->priority = 0; ac = IEEE80211_AC_BE; } } else { skb->priority = tid & IEEE80211_QOS_CTL_TID_MASK; ac = ieee80211e_up_to_ac[tid & 7]; } skb_set_queue_mapping(skb, ac); info = IEEE80211_SKB_CB(skb); info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; /* Slight delay; probably only happens on scanning so fine? */ if (c == NULL || c == IEEE80211_CHAN_ANYC) c = ic->ic_curchan; info->band = lkpi_net80211_chan_to_nl80211_band(c); info->hw_queue = vif->hw_queue[ac]; if (m->m_flags & M_EAPOL) info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; info->control.vif = vif; /* XXX-BZ info->control.rates */ #ifdef __notyet__ #ifdef LKPI_80211_HT info->control.rts_cts_rate_idx= info->control.use_rts= /* RTS */ info->control.use_cts_prot= /* RTS/CTS*/ #endif #endif sta = LSTA_TO_STA(lsta); #ifdef LKPI_80211_HW_CRYPTO if (lkpi_hwcrypto && keyix != IEEE80211_KEYIX_NONE) { int error; error = lkpi_hw_crypto_prepare(lsta, k, skb); if (error != 0) { /* * We only have to free the skb which will free the * mbuf and release the reference on the ni. */ dev_kfree_skb(skb); return; } } #endif IMPROVE(); ltxq = NULL; if (!ieee80211_is_data_present(hdr->frame_control)) { if (vif->type == NL80211_IFTYPE_STATION && lsta->added_to_drv && sta->txq[IEEE80211_NUM_TIDS] != NULL) ltxq = TXQ_TO_LTXQ(sta->txq[IEEE80211_NUM_TIDS]); } else if (lsta->added_to_drv && sta->txq[skb->priority] != NULL) { ltxq = TXQ_TO_LTXQ(sta->txq[skb->priority]); } if (ltxq == NULL) goto ops_tx; KASSERT(ltxq != NULL, ("%s: lsta %p sta %p m %p skb %p " "ltxq %p != NULL\n", __func__, lsta, sta, m, skb, ltxq)); LKPI_80211_LTXQ_LOCK(ltxq); skb_queue_tail(<xq->skbq, skb); #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_TX) - printf("%s:%d mo_wake_tx_queue :: %d %u lsta %p sta %p " + printf("%s:%d mo_wake_tx_queue :: %d %lu lsta %p sta %p " "ni %p %6D skb %p lxtq %p { qlen %u, ac %d tid %u } " "WAKE_TX_Q ac %d prio %u qmap %u\n", __func__, __LINE__, - curthread->td_tid, (unsigned int)ticks, + curthread->td_tid, jiffies, lsta, sta, ni, ni->ni_macaddr, ":", skb, ltxq, skb_queue_len(<xq->skbq), ltxq->txq.ac, ltxq->txq.tid, ac, skb->priority, skb->qmap); #endif LKPI_80211_LTXQ_UNLOCK(ltxq); wiphy_lock(hw->wiphy); lkpi_80211_mo_wake_tx_queue(hw, <xq->txq); wiphy_unlock(hw->wiphy); return; ops_tx: #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_TX) printf("%s:%d mo_tx :: lsta %p sta %p ni %p %6D skb %p " "TX ac %d prio %u qmap %u\n", __func__, __LINE__, lsta, sta, ni, ni->ni_macaddr, ":", skb, ac, skb->priority, skb->qmap); #endif memset(&control, 0, sizeof(control)); control.sta = sta; wiphy_lock(hw->wiphy); lkpi_80211_mo_tx(hw, &control, skb); wiphy_unlock(hw->wiphy); } static void lkpi_80211_txq_task(void *ctx, int pending) { struct lkpi_sta *lsta; struct mbufq mq; struct mbuf *m; bool shall_tx; lsta = ctx; #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_TX) printf("%s:%d lsta %p ni %p %6D pending %d mbuf_qlen %d\n", __func__, __LINE__, lsta, lsta->ni, lsta->ni->ni_macaddr, ":", pending, mbufq_len(&lsta->txq)); #endif mbufq_init(&mq, IFQ_MAXLEN); LKPI_80211_LSTA_TXQ_LOCK(lsta); /* * Do not re-check lsta->txq_ready here; we may have a pending * disassoc/deauth frame still. On the contrary if txq_ready is * false we do not have a valid sta anymore in the firmware so no * point to try to TX. * We also use txq_ready as a semaphore and will drain the txq manually * if needed on our way towards SCAN/INIT in the state machine. */ #if 0 shall_tx = lsta->added_to_drv && lsta->txq_ready; #else /* * Backout this part of 886653492945f which breaks rtw88 or * in general drivers without (*sta_state)() but only the * legacy fallback to (*sta_add)(). */ shall_tx = lsta->txq_ready; #endif if (__predict_true(shall_tx)) mbufq_concat(&mq, &lsta->txq); /* * else a state change will push the packets out manually or * lkpi_lsta_free() will drain the lsta->txq and free the mbufs. */ LKPI_80211_LSTA_TXQ_UNLOCK(lsta); m = mbufq_dequeue(&mq); while (m != NULL) { lkpi_80211_txq_tx_one(lsta, m); m = mbufq_dequeue(&mq); } } static int lkpi_ic_transmit(struct ieee80211com *ic, struct mbuf *m) { /* XXX TODO */ IMPROVE(); /* Quick and dirty cheating hack. */ struct ieee80211_node *ni; ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; return (lkpi_xmit(ni, m, NULL, false)); } #ifdef LKPI_80211_HT static int lkpi_ic_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct ieee80211com *ic; struct lkpi_hw *lhw; ic = ni->ni_ic; lhw = ic->ic_softc; IMPROVE_HT("recv_action called; nothing to do in lkpi; make debugging"); return (lhw->ic_recv_action(ni, wh, frm, efrm)); } static int lkpi_ic_send_action(struct ieee80211_node *ni, int category, int action, void *sa) { struct ieee80211com *ic; struct lkpi_hw *lhw; ic = ni->ni_ic; lhw = ic->ic_softc; IMPROVE_HT("send_action called; nothing to do in lkpi; make debugging"); return (lhw->ic_send_action(ni, category, action, sa)); } static int lkpi_ic_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct ieee80211com *ic; struct lkpi_hw *lhw; ic = ni->ni_ic; lhw = ic->ic_softc; IMPROVE_HT("ieee80211_ampdu_enable called; nothing to do in lkpi for now; make debugging"); return (lhw->ic_ampdu_enable(ni, tap)); } /* * (*ic_addba_request)() is called by ieee80211_ampdu_request() before * calling send_action(CAT_BA, BA_ADDBA_REQUEST). * * NB: returns 0 on ERROR! */ static int lkpi_ic_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int dialogtoken, int baparamset, int batimeout) { struct ieee80211com *ic; struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct ieee80211vap *vap; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct lkpi_sta *lsta; struct ieee80211_sta *sta; struct ieee80211_ampdu_params params = { }; int error; ic = ni->ni_ic; lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); vap = ni->ni_vap; lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); lsta = ni->ni_drv_data; sta = LSTA_TO_STA(lsta); if (!lsta->added_to_drv) { ic_printf(ic, "%s: lsta %p ni %p, sta %p not added to firmware\n", __func__, lsta, ni, sta); return (0); } params.sta = sta; params.action = IEEE80211_AMPDU_TX_START; /* Keep 0 here! */ params.buf_size = 0; params.timeout = 0; params.ssn = tap->txa_start & (IEEE80211_SEQ_RANGE-1); params.tid = tap->txa_tid; params.amsdu = false; IEEE80211_UNLOCK(ic); wiphy_lock(hw->wiphy); error = lkpi_80211_mo_ampdu_action(hw, vif, ¶ms); wiphy_unlock(hw->wiphy); IEEE80211_LOCK(ic); if (error != 0) { ic_printf(ic, "%s: mo_ampdu_action returned %d. ni %p tap %p\n", __func__, error, ni, tap); return (0); } return (lhw->ic_addba_request(ni, tap, dialogtoken, baparamset, batimeout)); } /* * (*ic_addba_response)() is called from ht_recv_action_ba_addba_response() * and calls the default ieee80211_addba_response() which always returns 1. * * NB: No error checking in net80211! * Staying with 0 is an error. */ static int lkpi_ic_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status, int baparamset, int batimeout) { struct ieee80211com *ic; struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct ieee80211vap *vap; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct lkpi_sta *lsta; struct ieee80211_sta *sta; struct ieee80211_ampdu_params params = { }; int error; ic = ni->ni_ic; lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); vap = ni->ni_vap; lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); lsta = ni->ni_drv_data; sta = LSTA_TO_STA(lsta); if (!lsta->added_to_drv) { ic_printf(ic, "%s: lsta %p ni %p, sta %p not added to firmware\n", __func__, lsta, ni, sta); return (0); } if (status == IEEE80211_STATUS_SUCCESS) { params.sta = sta; params.action = IEEE80211_AMPDU_TX_OPERATIONAL; params.buf_size = tap->txa_wnd; params.timeout = 0; params.ssn = 0; params.tid = tap->txa_tid; if ((tap->txa_flags & IEEE80211_AGGR_AMSDU) != 0) params.amsdu = true; else params.amsdu = false; } else { /* We need to free the allocated resources. */ params.sta = sta; switch (status) { /* params.action = FLUSH, FLUSH_CONT */ default: params.action = IEEE80211_AMPDU_TX_STOP_CONT; break; } params.buf_size = 0; params.timeout = 0; params.ssn = 0; params.tid = tap->txa_tid; params.amsdu = false; } IEEE80211_UNLOCK(ic); wiphy_lock(hw->wiphy); error = lkpi_80211_mo_ampdu_action(hw, vif, ¶ms); wiphy_unlock(hw->wiphy); IEEE80211_LOCK(ic); if (error != 0) { ic_printf(ic, "%s: mo_ampdu_action returned %d. ni %p tap %p\n", __func__, error, ni, tap); return (0); } IMPROVE_HT("who unleashes the TXQ? and when?, do we need to ni->ni_txseqs[tid] = tap->txa_start & 0xfff;"); return (lhw->ic_addba_response(ni, tap, status, baparamset, batimeout)); } /* * (*ic_addba_stop)() is called from ampdu_tx_stop(), ht_recv_action_ba_delba(), * and ieee80211_ampdu_stop() and calls the default ieee80211_addba_stop(). */ static void lkpi_ic_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct ieee80211com *ic; struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct ieee80211vap *vap; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct lkpi_sta *lsta; struct ieee80211_sta *sta; struct ieee80211_ampdu_params params = { }; int error; ic = ni->ni_ic; lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); vap = ni->ni_vap; lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); lsta = ni->ni_drv_data; sta = LSTA_TO_STA(lsta); if (!lsta->added_to_drv) { ic_printf(ic, "%s: lsta %p ni %p, sta %p not added to firmware\n", __func__, lsta, ni, sta); goto n80211; } /* We need to free the allocated resources. */ params.sta = sta; IMPROVE("net80211 does not provide a reason to us"); params.action = IEEE80211_AMPDU_TX_STOP_CONT; /* params.action = FLUSH, FLUSH_CONT */ params.buf_size = 0; params.timeout = 0; params.ssn = 0; params.tid = tap->txa_tid; params.amsdu = false; IEEE80211_UNLOCK(ic); wiphy_lock(hw->wiphy); error = lkpi_80211_mo_ampdu_action(hw, vif, ¶ms); wiphy_unlock(hw->wiphy); IEEE80211_LOCK(ic); if (error != 0) { ic_printf(ic, "%s: mo_ampdu_action returned %d. ni %p tap %p\n", __func__, error, ni, tap); goto n80211; } IMPROVE_HT("anyting else?"); n80211: lhw->ic_addba_stop(ni, tap); } static void lkpi_ic_addba_response_timeout(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct ieee80211com *ic; struct lkpi_hw *lhw; ic = ni->ni_ic; lhw = ic->ic_softc; IMPROVE_HT(); lhw->ic_addba_response_timeout(ni, tap); } static void lkpi_ic_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int status) { struct ieee80211com *ic; struct lkpi_hw *lhw; ic = ni->ni_ic; lhw = ic->ic_softc; IMPROVE_HT(); lhw->ic_bar_response(ni, tap, status); } static int lkpi_ic_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, int baparamset, int batimeout, int baseqctl) { struct ieee80211com *ic; struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct ieee80211vap *vap; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct lkpi_sta *lsta; struct ieee80211_sta *sta; struct ieee80211_ampdu_params params = { }; int error; ic = ni->ni_ic; lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); vap = ni->ni_vap; lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); lsta = ni->ni_drv_data; sta = LSTA_TO_STA(lsta); IEEE80211_UNLOCK_ASSERT(ic); if (!lsta->added_to_drv) { ic_printf(ic, "%s: lsta %p ni %p vap %p, sta %p not added to firmware\n", __func__, lsta, ni, vap, sta); return (-ENXIO); } params.sta = sta; params.action = IEEE80211_AMPDU_RX_START; params.buf_size = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ); if (params.buf_size == 0) params.buf_size = IEEE80211_MAX_AMPDU_BUF_HT; else params.buf_size = min(params.buf_size, IEEE80211_MAX_AMPDU_BUF_HT); if (hw->max_rx_aggregation_subframes > 0 && params.buf_size > hw->max_rx_aggregation_subframes) params.buf_size = hw->max_rx_aggregation_subframes; params.timeout = le16toh(batimeout); params.ssn = _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START); params.tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID); /* Based on net80211::ampdu_rx_start(). */ if ((vap->iv_htcaps & IEEE80211_HTC_RX_AMSDU_AMPDU) && (_IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_AMSDU))) params.amsdu = true; else params.amsdu = false; wiphy_lock(hw->wiphy); error = lkpi_80211_mo_ampdu_action(hw, vif, ¶ms); wiphy_unlock(hw->wiphy); if (error != 0) { ic_printf(ic, "%s: mo_ampdu_action returned %d. ni %p rap %p\n", __func__, error, ni, rap); return (error); } if (!ieee80211_hw_check(hw, SUPPORTS_REORDERING_BUFFER)) { IMPROVE("%s: TODO: SUPPORTS_REORDERING_BUFFER not set; check net80211\n", __func__); } IMPROVE_HT("net80211 is missing the error check on return and assumes success"); error = lhw->ic_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); return (error); } static void lkpi_ic_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) { struct ieee80211com *ic; struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct ieee80211vap *vap; struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct lkpi_sta *lsta; struct ieee80211_sta *sta; struct ieee80211_ampdu_params params = { }; int error; uint8_t tid; bool ic_locked; ic = ni->ni_ic; lhw = ic->ic_softc; /* * We should not (cannot) call into mac80211 ops with AMPDU_RX_STOP if * we did not START. Some drivers pass it down to firmware which will * simply barf and net80211 calls ieee80211_ht_node_cleanup() from * ieee80211_ht_node_init() amongst others which will iterate over all * tid and call ic_ampdu_rx_stop() unconditionally. * XXX net80211 should probably be more "gentle" in these cases and * track some state itself. */ if ((rap->rxa_flags & IEEE80211_AGGR_RUNNING) == 0) goto net80211_only; hw = LHW_TO_HW(lhw); vap = ni->ni_vap; lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); lsta = ni->ni_drv_data; sta = LSTA_TO_STA(lsta); IMPROVE_HT("This really should be passed from ht_recv_action_ba_delba."); for (tid = 0; tid < WME_NUM_TID; tid++) { if (&ni->ni_rx_ampdu[tid] == rap) break; } params.sta = sta; params.action = IEEE80211_AMPDU_RX_STOP; params.buf_size = 0; params.timeout = 0; params.ssn = 0; params.tid = tid; params.amsdu = false; ic_locked = IEEE80211_IS_LOCKED(ic); if (ic_locked) IEEE80211_UNLOCK(ic); wiphy_lock(hw->wiphy); error = lkpi_80211_mo_ampdu_action(hw, vif, ¶ms); wiphy_unlock(hw->wiphy); if (ic_locked) IEEE80211_LOCK(ic); if (error != 0) ic_printf(ic, "%s: mo_ampdu_action returned %d. ni %p rap %p\n", __func__, error, ni, rap); net80211_only: lhw->ic_ampdu_rx_stop(ni, rap); } #endif static void lkpi_ic_getradiocaps_ht(struct ieee80211com *ic, struct ieee80211_hw *hw, uint8_t *bands, int *chan_flags, enum nl80211_band band) { #ifdef LKPI_80211_HT struct ieee80211_sta_ht_cap *ht_cap; ht_cap = &hw->wiphy->bands[band]->ht_cap; if (!ht_cap->ht_supported) return; switch (band) { case NL80211_BAND_2GHZ: setbit(bands, IEEE80211_MODE_11NG); break; case NL80211_BAND_5GHZ: setbit(bands, IEEE80211_MODE_11NA); break; default: IMPROVE("Unsupported band %d", band); return; } ic->ic_htcaps = IEEE80211_HTC_HT; /* HT operation */ /* * Rather than manually checking each flag and * translating IEEE80211_HT_CAP_ to IEEE80211_HTCAP_, * simply copy the 16bits. */ ic->ic_htcaps |= ht_cap->cap; /* Then deal with the other flags. */ if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) ic->ic_htcaps |= IEEE80211_HTC_AMPDU; #ifdef __notyet__ if (ieee80211_hw_check(hw, TX_AMSDU)) ic->ic_htcaps |= IEEE80211_HTC_AMSDU; if (ieee80211_hw_check(hw, SUPPORTS_AMSDU_IN_AMPDU)) ic->ic_htcaps |= (IEEE80211_HTC_RX_AMSDU_AMPDU | IEEE80211_HTC_TX_AMSDU_AMPDU); #endif IMPROVE("PS, ampdu_*, ht_cap.mcs.tx_params, ..."); ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_OFF; /* Only add HT40 channels if supported. */ if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) != 0 && chan_flags != NULL) *chan_flags |= NET80211_CBW_FLAG_HT40; #endif } static void lkpi_ic_getradiocaps(struct ieee80211com *ic, int maxchan, int *n, struct ieee80211_channel *c) { struct lkpi_hw *lhw; struct ieee80211_hw *hw; struct linuxkpi_ieee80211_channel *channels; uint8_t bands[IEEE80211_MODE_BYTES]; int chan_flags, error, i, nchans; /* Channels */ lhw = ic->ic_softc; hw = LHW_TO_HW(lhw); /* NL80211_BAND_2GHZ */ nchans = 0; if (hw->wiphy->bands[NL80211_BAND_2GHZ] != NULL) nchans = hw->wiphy->bands[NL80211_BAND_2GHZ]->n_channels; if (nchans > 0) { memset(bands, 0, sizeof(bands)); chan_flags = 0; setbit(bands, IEEE80211_MODE_11B); /* XXX-BZ unclear how to check for 11g. */ IMPROVE("the bitrates may have flags?"); setbit(bands, IEEE80211_MODE_11G); lkpi_ic_getradiocaps_ht(ic, hw, bands, &chan_flags, NL80211_BAND_2GHZ); channels = hw->wiphy->bands[NL80211_BAND_2GHZ]->channels; for (i = 0; i < nchans && *n < maxchan; i++) { uint32_t nflags = 0; int cflags = chan_flags; if (channels[i].flags & IEEE80211_CHAN_DISABLED) { ic_printf(ic, "%s: Skipping disabled chan " "[%u/%u/%#x]\n", __func__, channels[i].hw_value, channels[i].center_freq, channels[i].flags); continue; } if (channels[i].flags & IEEE80211_CHAN_NO_IR) nflags |= (IEEE80211_CHAN_NOADHOC|IEEE80211_CHAN_PASSIVE); if (channels[i].flags & IEEE80211_CHAN_RADAR) nflags |= IEEE80211_CHAN_DFS; if (channels[i].flags & IEEE80211_CHAN_NO_160MHZ) cflags &= ~(NET80211_CBW_FLAG_VHT160|NET80211_CBW_FLAG_VHT80P80); if (channels[i].flags & IEEE80211_CHAN_NO_80MHZ) cflags &= ~NET80211_CBW_FLAG_VHT80; /* XXX how to map the remaining enum ieee80211_channel_flags? */ if (channels[i].flags & IEEE80211_CHAN_NO_HT40) cflags &= ~NET80211_CBW_FLAG_HT40; error = ieee80211_add_channel_cbw(c, maxchan, n, channels[i].hw_value, channels[i].center_freq, channels[i].max_power, nflags, bands, cflags); /* net80211::ENOBUFS: *n >= maxchans */ if (error != 0 && error != ENOBUFS) ic_printf(ic, "%s: Adding chan %u/%u/%#x/%#x/%#x/%#x " "returned error %d\n", __func__, channels[i].hw_value, channels[i].center_freq, channels[i].flags, nflags, chan_flags, cflags, error); if (error != 0) break; } } /* NL80211_BAND_5GHZ */ nchans = 0; if (hw->wiphy->bands[NL80211_BAND_5GHZ] != NULL) nchans = hw->wiphy->bands[NL80211_BAND_5GHZ]->n_channels; if (nchans > 0) { memset(bands, 0, sizeof(bands)); chan_flags = 0; setbit(bands, IEEE80211_MODE_11A); lkpi_ic_getradiocaps_ht(ic, hw, bands, &chan_flags, NL80211_BAND_5GHZ); #ifdef LKPI_80211_VHT if (hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.vht_supported) { ic->ic_flags_ext |= IEEE80211_FEXT_VHT; ic->ic_vht_cap.vht_cap_info = hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap; ic->ic_vht_cap.supp_mcs = hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.vht_mcs; setbit(bands, IEEE80211_MODE_VHT_5GHZ); chan_flags |= NET80211_CBW_FLAG_VHT80; if (IEEE80211_VHTCAP_SUPP_CHAN_WIDTH_IS_160MHZ( ic->ic_vht_cap.vht_cap_info)) chan_flags |= NET80211_CBW_FLAG_VHT160; if (IEEE80211_VHTCAP_SUPP_CHAN_WIDTH_IS_160_80P80MHZ( ic->ic_vht_cap.vht_cap_info)) chan_flags |= NET80211_CBW_FLAG_VHT80P80; } #endif channels = hw->wiphy->bands[NL80211_BAND_5GHZ]->channels; for (i = 0; i < nchans && *n < maxchan; i++) { uint32_t nflags = 0; int cflags = chan_flags; if (channels[i].flags & IEEE80211_CHAN_DISABLED) { ic_printf(ic, "%s: Skipping disabled chan " "[%u/%u/%#x]\n", __func__, channels[i].hw_value, channels[i].center_freq, channels[i].flags); continue; } if (channels[i].flags & IEEE80211_CHAN_NO_IR) nflags |= (IEEE80211_CHAN_NOADHOC|IEEE80211_CHAN_PASSIVE); if (channels[i].flags & IEEE80211_CHAN_RADAR) nflags |= IEEE80211_CHAN_DFS; if (channels[i].flags & IEEE80211_CHAN_NO_160MHZ) cflags &= ~(NET80211_CBW_FLAG_VHT160|NET80211_CBW_FLAG_VHT80P80); if (channels[i].flags & IEEE80211_CHAN_NO_80MHZ) cflags &= ~NET80211_CBW_FLAG_VHT80; /* XXX hwo to map the remaining enum ieee80211_channel_flags? */ if (channels[i].flags & IEEE80211_CHAN_NO_HT40) cflags &= ~NET80211_CBW_FLAG_HT40; error = ieee80211_add_channel_cbw(c, maxchan, n, channels[i].hw_value, channels[i].center_freq, channels[i].max_power, nflags, bands, cflags); /* net80211::ENOBUFS: *n >= maxchans */ if (error != 0 && error != ENOBUFS) ic_printf(ic, "%s: Adding chan %u/%u/%#x/%#x/%#x/%#x " "returned error %d\n", __func__, channels[i].hw_value, channels[i].center_freq, channels[i].flags, nflags, chan_flags, cflags, error); if (error != 0) break; } } } static void * lkpi_ieee80211_ifalloc(void) { struct ieee80211com *ic; ic = malloc(sizeof(*ic), M_LKPI80211, M_WAITOK | M_ZERO); /* Setting these happens later when we have device information. */ ic->ic_softc = NULL; ic->ic_name = "linuxkpi"; return (ic); } struct ieee80211_hw * linuxkpi_ieee80211_alloc_hw(size_t priv_len, const struct ieee80211_ops *ops) { struct ieee80211_hw *hw; struct lkpi_hw *lhw; struct wiphy *wiphy; int ac; /* Get us and the driver data also allocated. */ wiphy = wiphy_new(&linuxkpi_mac80211cfgops, sizeof(*lhw) + priv_len); if (wiphy == NULL) return (NULL); lhw = wiphy_priv(wiphy); lhw->ops = ops; LKPI_80211_LHW_SCAN_LOCK_INIT(lhw); LKPI_80211_LHW_TXQ_LOCK_INIT(lhw); sx_init_flags(&lhw->lvif_sx, "lhw-lvif", SX_RECURSE | SX_DUPOK); TAILQ_INIT(&lhw->lvif_head); for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { lhw->txq_generation[ac] = 1; TAILQ_INIT(&lhw->scheduled_txqs[ac]); } /* Chanctx_conf */ INIT_LIST_HEAD(&lhw->lchanctx_list); /* Deferred RX path. */ LKPI_80211_LHW_RXQ_LOCK_INIT(lhw); TASK_INIT(&lhw->rxq_task, 0, lkpi_80211_lhw_rxq_task, lhw); mbufq_init(&lhw->rxq, 32 * NAPI_POLL_WEIGHT); lhw->rxq_stopped = false; /* * XXX-BZ TODO make sure there is a "_null" function to all ops * not initialized. */ hw = LHW_TO_HW(lhw); hw->wiphy = wiphy; hw->conf.flags |= IEEE80211_CONF_IDLE; hw->priv = (void *)(lhw + 1); /* BSD Specific. */ lhw->ic = lkpi_ieee80211_ifalloc(); IMPROVE(); return (hw); } void linuxkpi_ieee80211_iffree(struct ieee80211_hw *hw) { struct lkpi_hw *lhw; struct mbuf *m; lhw = HW_TO_LHW(hw); free(lhw->ic, M_LKPI80211); lhw->ic = NULL; /* * Drain the deferred RX path. */ LKPI_80211_LHW_RXQ_LOCK(lhw); lhw->rxq_stopped = true; LKPI_80211_LHW_RXQ_UNLOCK(lhw); /* Drain taskq, won't be restarted due to rxq_stopped being set. */ while (taskqueue_cancel(taskqueue_thread, &lhw->rxq_task, NULL) != 0) taskqueue_drain(taskqueue_thread, &lhw->rxq_task); /* Flush mbufq (make sure to release ni refs!). */ m = mbufq_dequeue(&lhw->rxq); while (m != NULL) { #ifdef LKPI_80211_USE_MTAG struct m_tag *mtag; mtag = m_tag_locate(m, MTAG_ABI_LKPI80211, LKPI80211_TAG_RXNI, NULL); if (mtag != NULL) { struct lkpi_80211_tag_rxni *rxni; rxni = (struct lkpi_80211_tag_rxni *)(mtag + 1); ieee80211_free_node(rxni->ni); } #else if (m->m_pkthdr.PH_loc.ptr != NULL) { struct ieee80211_node *ni; ni = m->m_pkthdr.PH_loc.ptr; ieee80211_free_node(ni); } #endif m_freem(m); m = mbufq_dequeue(&lhw->rxq); } KASSERT(mbufq_empty(&lhw->rxq), ("%s: lhw %p has rxq len %d != 0\n", __func__, lhw, mbufq_len(&lhw->rxq))); LKPI_80211_LHW_RXQ_LOCK_DESTROY(lhw); /* Chanctx_conf. */ if (!list_empty_careful(&lhw->lchanctx_list)) { struct lkpi_chanctx *lchanctx, *next; struct ieee80211_chanctx_conf *chanctx_conf; list_for_each_entry_safe(lchanctx, next, &lhw->lchanctx_list, entry) { if (lchanctx->added_to_drv) { /* In reality we should panic? */ chanctx_conf = &lchanctx->chanctx_conf; lkpi_80211_mo_remove_chanctx(hw, chanctx_conf); } list_del(&lchanctx->entry); free(lchanctx, M_LKPI80211); } } /* Cleanup more of lhw here or in wiphy_free()? */ LKPI_80211_LHW_TXQ_LOCK_DESTROY(lhw); LKPI_80211_LHW_SCAN_LOCK_DESTROY(lhw); sx_destroy(&lhw->lvif_sx); IMPROVE(); } void linuxkpi_set_ieee80211_dev(struct ieee80211_hw *hw, char *name) { struct lkpi_hw *lhw; struct ieee80211com *ic; lhw = HW_TO_LHW(hw); ic = lhw->ic; /* Now set a proper name before ieee80211_ifattach(). */ ic->ic_softc = lhw; ic->ic_name = name; /* XXX-BZ do we also need to set wiphy name? */ } struct ieee80211_hw * linuxkpi_wiphy_to_ieee80211_hw(struct wiphy *wiphy) { struct lkpi_hw *lhw; lhw = wiphy_priv(wiphy); return (LHW_TO_HW(lhw)); } static void lkpi_radiotap_attach(struct lkpi_hw *lhw) { struct ieee80211com *ic; ic = lhw->ic; ieee80211_radiotap_attach(ic, &lhw->rtap_tx.wt_ihdr, sizeof(lhw->rtap_tx), LKPI_RTAP_TX_FLAGS_PRESENT, &lhw->rtap_rx.wr_ihdr, sizeof(lhw->rtap_rx), LKPI_RTAP_RX_FLAGS_PRESENT); } int linuxkpi_ieee80211_ifattach(struct ieee80211_hw *hw) { struct ieee80211com *ic; struct lkpi_hw *lhw; int band, i; lhw = HW_TO_LHW(hw); ic = lhw->ic; /* We do it this late as wiphy->dev should be set for the name. */ lhw->workq = alloc_ordered_workqueue(wiphy_name(hw->wiphy), 0); if (lhw->workq == NULL) return (-EAGAIN); /* XXX-BZ figure this out how they count his... */ if (!is_zero_ether_addr(hw->wiphy->perm_addr)) { IEEE80211_ADDR_COPY(ic->ic_macaddr, hw->wiphy->perm_addr); } else if (hw->wiphy->n_addresses > 0) { /* We take the first one. */ IEEE80211_ADDR_COPY(ic->ic_macaddr, hw->wiphy->addresses[0].addr); } else { ic_printf(ic, "%s: warning, no hardware address!\n", __func__); } #ifdef __not_yet__ /* See comment in lkpi_80211_txq_tx_one(). */ ic->ic_headroom = hw->extra_tx_headroom; #endif ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* Set device capabilities. */ /* XXX-BZ we need to get these from linux80211/drivers and convert. */ ic->ic_caps = IEEE80211_C_STA | IEEE80211_C_MONITOR | IEEE80211_C_WPA | /* WPA/RSN */ #ifdef LKPI_80211_WME IEEE80211_C_WME | #endif #if 0 IEEE80211_C_PMGT | #endif IEEE80211_C_SHSLOT | /* short slot time supported */ IEEE80211_C_SHPREAMBLE /* short preamble supported */ ; #if 0 /* Scanning is a different kind of beast to re-work. */ ic->ic_caps |= IEEE80211_C_BGSCAN; #endif if (lhw->ops->hw_scan) { /* * Advertise full-offload scanning. * * Not limiting to SINGLE_SCAN_ON_ALL_BANDS here as otherwise * we essentially disable hw_scan for all drivers not setting * the flag. */ ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD; lhw->scan_flags |= LKPI_LHW_SCAN_HW; } /* Does HW support Fragmentation offload? */ if (ieee80211_hw_check(hw, SUPPORTS_TX_FRAG)) ic->ic_flags_ext |= IEEE80211_FEXT_FRAG_OFFLOAD; /* * The wiphy variables report bitmasks of avail antennas. * (*get_antenna) get the current bitmask sets which can be * altered by (*set_antenna) for some drivers. * XXX-BZ will the count alone do us much good long-term in net80211? */ if (hw->wiphy->available_antennas_rx || hw->wiphy->available_antennas_tx) { uint32_t rxs, txs; if (lkpi_80211_mo_get_antenna(hw, &txs, &rxs) == 0) { ic->ic_rxstream = bitcount32(rxs); ic->ic_txstream = bitcount32(txs); } } ic->ic_cryptocaps = 0; #ifdef LKPI_80211_HW_CRYPTO if (lkpi_hwcrypto && hw->wiphy->n_cipher_suites > 0) { uint32_t hwciphers; hwciphers = 0; for (i = 0; i < hw->wiphy->n_cipher_suites; i++) { uint32_t cs; cs = lkpi_l80211_to_net80211_cyphers( ic, hw->wiphy->cipher_suites[i]); if (cs == IEEE80211_CRYPTO_TKIP) { /* * We do set this here. We will only find out * when doing a SET_KEY operation depending on * what the driver returns. * net80211::ieee80211_crypto_newkey() * checks this so we will have to do flags * surgery later. */ cs |= IEEE80211_CRYPTO_TKIPMIC; } hwciphers |= cs; } /* * (20250415) nothing anywhere in the path checks we actually * support all these in net80211. * net80211 supports _256 variants but the ioctl does not. */ IMPROVE("as net80211 grows more support, enable them"); hwciphers &= (IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_TKIP | IEEE80211_CRYPTO_TKIPMIC | IEEE80211_CRYPTO_AES_CCM | IEEE80211_CRYPTO_AES_GCM_128); /* * We only support CCMP here, so further filter. * Also permit TKIP if turned on. */ hwciphers &= (IEEE80211_CRYPTO_AES_CCM | (lkpi_hwcrypto_tkip ? (IEEE80211_CRYPTO_TKIP | IEEE80211_CRYPTO_TKIPMIC) : 0)); ieee80211_set_hardware_ciphers(ic, hwciphers); } #endif lkpi_ic_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ieee80211_ifattach(ic); ic->ic_update_mcast = lkpi_ic_update_mcast; ic->ic_update_promisc = lkpi_ic_update_promisc; ic->ic_update_chw = lkpi_ic_update_chw; ic->ic_parent = lkpi_ic_parent; ic->ic_scan_start = lkpi_ic_scan_start; ic->ic_scan_end = lkpi_ic_scan_end; ic->ic_set_channel = lkpi_ic_set_channel; ic->ic_transmit = lkpi_ic_transmit; ic->ic_raw_xmit = lkpi_ic_raw_xmit; ic->ic_vap_create = lkpi_ic_vap_create; ic->ic_vap_delete = lkpi_ic_vap_delete; ic->ic_getradiocaps = lkpi_ic_getradiocaps; ic->ic_wme.wme_update = lkpi_ic_wme_update; lhw->ic_scan_curchan = ic->ic_scan_curchan; ic->ic_scan_curchan = lkpi_ic_scan_curchan; lhw->ic_scan_mindwell = ic->ic_scan_mindwell; ic->ic_scan_mindwell = lkpi_ic_scan_mindwell; lhw->ic_node_alloc = ic->ic_node_alloc; ic->ic_node_alloc = lkpi_ic_node_alloc; lhw->ic_node_init = ic->ic_node_init; ic->ic_node_init = lkpi_ic_node_init; lhw->ic_node_cleanup = ic->ic_node_cleanup; ic->ic_node_cleanup = lkpi_ic_node_cleanup; lhw->ic_node_free = ic->ic_node_free; ic->ic_node_free = lkpi_ic_node_free; #ifdef LKPI_80211_HT /* * Only attach if the driver/firmware supports (*ampdu_action)(). * Otherwise it is in the hands of net80211. */ if (lhw->ops->ampdu_action != NULL) { lhw->ic_recv_action = ic->ic_recv_action; ic->ic_recv_action = lkpi_ic_recv_action; lhw->ic_send_action = ic->ic_send_action; ic->ic_send_action = lkpi_ic_send_action; lhw->ic_ampdu_enable = ic->ic_ampdu_enable; ic->ic_ampdu_enable = lkpi_ic_ampdu_enable; lhw->ic_addba_request = ic->ic_addba_request; ic->ic_addba_request = lkpi_ic_addba_request; lhw->ic_addba_response = ic->ic_addba_response; ic->ic_addba_response = lkpi_ic_addba_response; lhw->ic_addba_stop = ic->ic_addba_stop; ic->ic_addba_stop = lkpi_ic_addba_stop; lhw->ic_addba_response_timeout = ic->ic_addba_response_timeout; ic->ic_addba_response_timeout = lkpi_ic_addba_response_timeout; lhw->ic_bar_response = ic->ic_bar_response; ic->ic_bar_response = lkpi_ic_bar_response; lhw->ic_ampdu_rx_start = ic->ic_ampdu_rx_start; ic->ic_ampdu_rx_start = lkpi_ic_ampdu_rx_start; lhw->ic_ampdu_rx_stop = ic->ic_ampdu_rx_stop; ic->ic_ampdu_rx_stop = lkpi_ic_ampdu_rx_stop; } #endif lkpi_radiotap_attach(lhw); /* * Assign the first possible channel for now; seems Realtek drivers * expect one. * Also remember the amount of bands we support and the most rates * in any band so we can scale [(ext) sup rates] IE(s) accordingly. */ lhw->supbands = lhw->max_rates = 0; for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *supband; struct linuxkpi_ieee80211_channel *channels; supband = hw->wiphy->bands[band]; if (supband == NULL || supband->n_channels == 0) continue; lhw->supbands++; lhw->max_rates = max(lhw->max_rates, supband->n_bitrates); /* If we have a channel, we need to keep counting supbands. */ if (hw->conf.chandef.chan != NULL) continue; channels = supband->channels; for (i = 0; i < supband->n_channels; i++) { if (channels[i].flags & IEEE80211_CHAN_DISABLED) continue; cfg80211_chandef_create(&hw->conf.chandef, &channels[i], #ifdef LKPI_80211_HT (ic->ic_flags_ht & IEEE80211_FHT_HT) ? NL80211_CHAN_HT20 : #endif NL80211_CHAN_NO_HT); break; } } IMPROVE("see net80211::ieee80211_chan_init vs. wiphy->bands[].bitrates possibly in lkpi_ic_getradiocaps?"); /* Make sure we do not support more than net80211 is willing to take. */ if (lhw->max_rates > IEEE80211_RATE_MAXSIZE) { ic_printf(ic, "%s: limiting max_rates %d to %d!\n", __func__, lhw->max_rates, IEEE80211_RATE_MAXSIZE); lhw->max_rates = IEEE80211_RATE_MAXSIZE; } /* * The maximum supported bitrates on any band + size for * DSSS Parameter Set give our per-band IE size. * SSID is the responsibility of the driver and goes on the side. * The user specified bits coming from the vap go into the * "common ies" fields. */ lhw->scan_ie_len = 2 + IEEE80211_RATE_SIZE; if (lhw->max_rates > IEEE80211_RATE_SIZE) lhw->scan_ie_len += 2 + (lhw->max_rates - IEEE80211_RATE_SIZE); if (hw->wiphy->features & NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) { /* * net80211 does not seem to support the DSSS Parameter Set but * some of the drivers insert it so calculate the extra fixed * space in. */ lhw->scan_ie_len += 2 + 1; } #if defined(LKPI_80211_HT) if ((ic->ic_htcaps & IEEE80211_HTC_HT) != 0) lhw->scan_ie_len += sizeof(struct ieee80211_ie_htcap); #endif #if defined(LKPI_80211_VHT) if (IEEE80211_CONF_VHT(ic)) lhw->scan_ie_len += 2 + sizeof(struct ieee80211_vht_cap); #endif /* Reduce the max_scan_ie_len "left" by the amount we consume already. */ if (hw->wiphy->max_scan_ie_len > 0) { if (lhw->scan_ie_len > hw->wiphy->max_scan_ie_len) goto err; hw->wiphy->max_scan_ie_len -= lhw->scan_ie_len; } if (bootverbose) ieee80211_announce(ic); return (0); err: IMPROVE("TODO FIXME CLEANUP"); return (-EAGAIN); } void linuxkpi_ieee80211_ifdetach(struct ieee80211_hw *hw) { struct lkpi_hw *lhw; struct ieee80211com *ic; lhw = HW_TO_LHW(hw); ic = lhw->ic; ieee80211_ifdetach(ic); } void linuxkpi_ieee80211_iterate_interfaces(struct ieee80211_hw *hw, enum ieee80211_iface_iter flags, void(*iterfunc)(void *, uint8_t *, struct ieee80211_vif *), void *arg) { struct lkpi_hw *lhw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; bool active, atomic, nin_drv; lhw = HW_TO_LHW(hw); if (flags & ~(IEEE80211_IFACE_ITER_NORMAL| IEEE80211_IFACE_ITER_RESUME_ALL| IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER| IEEE80211_IFACE_ITER_ACTIVE|IEEE80211_IFACE_ITER__ATOMIC)) { ic_printf(lhw->ic, "XXX TODO %s flags(%#x) not yet supported.\n", __func__, flags); } active = (flags & IEEE80211_IFACE_ITER_ACTIVE) != 0; atomic = (flags & IEEE80211_IFACE_ITER__ATOMIC) != 0; nin_drv = (flags & IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER) != 0; if (atomic) LKPI_80211_LHW_LVIF_LOCK(lhw); TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) { struct ieee80211vap *vap; vif = LVIF_TO_VIF(lvif); /* * If we want "active" interfaces, we need to distinguish on * whether the driver knows about them or not to be able to * handle the "resume" case correctly. Skip the ones the * driver does not know about. */ if (active && !lvif->added_to_drv && (flags & IEEE80211_IFACE_ITER_RESUME_ALL) != 0) continue; /* * If we shall skip interfaces not added to the driver do so * if we haven't yet. */ if (nin_drv && !lvif->added_to_drv) continue; /* * Run the iterator function if we are either not asking * asking for active only or if the VAP is "running". */ /* XXX-BZ probably should have state in the lvif as well. */ vap = LVIF_TO_VAP(lvif); if (!active || (vap->iv_state != IEEE80211_S_INIT)) iterfunc(arg, vif->addr, vif); } if (atomic) LKPI_80211_LHW_LVIF_UNLOCK(lhw); } static void lkpi_ieee80211_iterate_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ieee80211_keyix keyix, struct lkpi_sta *lsta, void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *, void *), void *arg) { if (!lsta->added_to_drv) return; if (lsta->kc[keyix] == NULL) return; iterfunc(hw, vif, LSTA_TO_STA(lsta), lsta->kc[keyix], arg); } void linuxkpi_ieee80211_iterate_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *, void *), void *arg, bool rcu) { struct lkpi_sta *lsta; struct lkpi_vif *lvif; lvif = VIF_TO_LVIF(vif); if (rcu) { rcu_read_lock_held(); /* XXX-BZ is this correct? */ if (vif == NULL) { TODO(); } else { list_for_each_entry_rcu(lsta, &lvif->lsta_list, lsta_list) { for (ieee80211_keyix keyix = 0; keyix < nitems(lsta->kc); keyix++) lkpi_ieee80211_iterate_keys(hw, vif, keyix, lsta, iterfunc, arg); } } } else { TODO("Used by suspend/resume; order of keys as installed to " "firmware is important; we'll need to rewrite some code for that"); lockdep_assert_wiphy(hw->wiphy); if (vif == NULL) { TODO(); } else { list_for_each_entry(lsta, &lvif->lsta_list, lsta_list) { for (ieee80211_keyix keyix = 0; keyix < nitems(lsta->kc); keyix++) lkpi_ieee80211_iterate_keys(hw, vif, keyix, lsta, iterfunc, arg); } } } } void linuxkpi_ieee80211_iterate_chan_contexts(struct ieee80211_hw *hw, void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, void *), void *arg) { struct lkpi_hw *lhw; struct lkpi_chanctx *lchanctx; KASSERT(hw != NULL && iterfunc != NULL, ("%s: hw %p iterfunc %p arg %p\n", __func__, hw, iterfunc, arg)); lhw = HW_TO_LHW(hw); rcu_read_lock(); list_for_each_entry_rcu(lchanctx, &lhw->lchanctx_list, entry) { if (!lchanctx->added_to_drv) continue; iterfunc(hw, &lchanctx->chanctx_conf, arg); } rcu_read_unlock(); } void linuxkpi_ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw, void (*iterfunc)(void *, struct ieee80211_sta *), void *arg) { struct lkpi_hw *lhw; struct lkpi_vif *lvif; struct lkpi_sta *lsta; struct ieee80211_sta *sta; KASSERT(hw != NULL && iterfunc != NULL, ("%s: hw %p iterfunc %p arg %p\n", __func__, hw, iterfunc, arg)); lhw = HW_TO_LHW(hw); LKPI_80211_LHW_LVIF_LOCK(lhw); TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) { rcu_read_lock(); list_for_each_entry_rcu(lsta, &lvif->lsta_list, lsta_list) { if (!lsta->added_to_drv) continue; sta = LSTA_TO_STA(lsta); iterfunc(arg, sta); } rcu_read_unlock(); } LKPI_80211_LHW_LVIF_UNLOCK(lhw); } struct linuxkpi_ieee80211_regdomain * lkpi_get_linuxkpi_ieee80211_regdomain(size_t n) { struct linuxkpi_ieee80211_regdomain *regd; regd = kzalloc(sizeof(*regd) + n * sizeof(struct ieee80211_reg_rule), GFP_KERNEL); return (regd); } int linuxkpi_regulatory_set_wiphy_regd_sync(struct wiphy *wiphy, struct linuxkpi_ieee80211_regdomain *regd) { struct lkpi_hw *lhw; struct ieee80211com *ic; struct ieee80211_regdomain *rd; lhw = wiphy_priv(wiphy); ic = lhw->ic; rd = &ic->ic_regdomain; if (rd->isocc[0] == '\0') { rd->isocc[0] = regd->alpha2[0]; rd->isocc[1] = regd->alpha2[1]; } TODO(); /* XXX-BZ finish the rest. */ return (0); } void linuxkpi_ieee80211_scan_completed(struct ieee80211_hw *hw, struct cfg80211_scan_info *info) { struct lkpi_hw *lhw; struct ieee80211com *ic; struct ieee80211_scan_state *ss; lhw = wiphy_priv(hw->wiphy); ic = lhw->ic; ss = ic->ic_scan; ieee80211_scan_done(ss->ss_vap); LKPI_80211_LHW_SCAN_LOCK(lhw); free(lhw->hw_req, M_LKPI80211); lhw->hw_req = NULL; lhw->scan_flags &= ~LKPI_LHW_SCAN_RUNNING; wakeup(lhw); LKPI_80211_LHW_SCAN_UNLOCK(lhw); return; } static void lkpi_80211_lhw_rxq_rx_one(struct lkpi_hw *lhw, struct mbuf *m) { struct ieee80211_node *ni; #ifdef LKPI_80211_USE_MTAG struct m_tag *mtag; #endif int ok; ni = NULL; #ifdef LKPI_80211_USE_MTAG mtag = m_tag_locate(m, MTAG_ABI_LKPI80211, LKPI80211_TAG_RXNI, NULL); if (mtag != NULL) { struct lkpi_80211_tag_rxni *rxni; rxni = (struct lkpi_80211_tag_rxni *)(mtag + 1); ni = rxni->ni; } #else if (m->m_pkthdr.PH_loc.ptr != NULL) { ni = m->m_pkthdr.PH_loc.ptr; m->m_pkthdr.PH_loc.ptr = NULL; } #endif if (ni != NULL) { ok = ieee80211_input_mimo(ni, m); ieee80211_free_node(ni); /* Release the reference. */ if (ok < 0) m_freem(m); } else { ok = ieee80211_input_mimo_all(lhw->ic, m); /* mbuf got consumed. */ } #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_RX) printf("TRACE-RX: %s: handled frame type %#0x\n", __func__, ok); #endif } static void lkpi_80211_lhw_rxq_task(void *ctx, int pending) { struct lkpi_hw *lhw; struct mbufq mq; struct mbuf *m; lhw = ctx; #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_RX) printf("TRACE-RX: %s: lhw %p pending %d mbuf_qlen %d\n", __func__, lhw, pending, mbufq_len(&lhw->rxq)); #endif mbufq_init(&mq, IFQ_MAXLEN); LKPI_80211_LHW_RXQ_LOCK(lhw); mbufq_concat(&mq, &lhw->rxq); LKPI_80211_LHW_RXQ_UNLOCK(lhw); m = mbufq_dequeue(&mq); while (m != NULL) { lkpi_80211_lhw_rxq_rx_one(lhw, m); m = mbufq_dequeue(&mq); } } static void lkpi_convert_rx_status(struct ieee80211_hw *hw, struct lkpi_sta *lsta, struct ieee80211_rx_status *rx_status, struct ieee80211_rx_stats *rx_stats, uint8_t *rssip) { struct ieee80211_supported_band *supband; struct rate_info rxrate; int i; uint8_t rssi; memset(&rxrate, 0, sizeof(rxrate)); memset(rx_stats, 0, sizeof(*rx_stats)); rx_stats->r_flags = IEEE80211_R_NF | IEEE80211_R_RSSI; /* XXX-BZ correct hardcoded noise floor, survey data? */ rx_stats->c_nf = -96; if (ieee80211_hw_check(hw, SIGNAL_DBM) && !(rx_status->flag & RX_FLAG_NO_SIGNAL_VAL)) rssi = rx_status->signal; else rssi = rx_stats->c_nf; /* * net80211 signal strength data are in .5 dBm units relative to * the current noise floor (see comment in ieee80211_node.h). */ rssi -= rx_stats->c_nf; if (rssip != NULL) *rssip = rssi; rx_stats->c_rssi = rssi * 2; rx_stats->r_flags |= IEEE80211_R_BAND; rx_stats->c_band = lkpi_nl80211_band_to_net80211_band(rx_status->band); rx_stats->r_flags |= IEEE80211_R_FREQ | IEEE80211_R_IEEE; rx_stats->c_freq = rx_status->freq; rx_stats->c_ieee = ieee80211_mhz2ieee(rx_stats->c_freq, rx_stats->c_band); rx_stats->c_rx_tsf = rx_status->mactime; /* XXX RX_FLAG_MACTIME_IS_RTAP_TS64 ? */ if ((rx_status->flag & RX_FLAG_MACTIME) == (RX_FLAG_MACTIME_START|RX_FLAG_MACTIME_END)) { rx_stats->r_flags |= IEEE80211_R_TSF64; /* XXX RX_FLAG_MACTIME_PLCP_START ? */ if ((rx_status->flag & RX_FLAG_MACTIME) == RX_FLAG_MACTIME_START) rx_stats->r_flags |= IEEE80211_R_TSF_START; if ((rx_status->flag & RX_FLAG_MACTIME) == RX_FLAG_MACTIME_END) rx_stats->r_flags |= IEEE80211_R_TSF_END; /* XXX-BZ if TSF_END will net80211 do the unwind of time? */ } if (rx_status->chains != 0) { int cc; int8_t crssi; rx_stats->c_chain = rx_status->chains; rx_stats->r_flags |= IEEE80211_R_C_CHAIN; cc = 0; for (i = 0; i < nitems(rx_status->chain_signal); i++) { if (!(rx_status->chains & BIT(i))) continue; crssi = rx_status->chain_signal[i]; crssi -= rx_stats->c_nf; rx_stats->c_rssi_ctl[i] = crssi * 2; rx_stats->c_rssi_ext[i] = crssi * 2; /* XXX _ext ??? ATH thing? */ /* We currently only have the global noise floor value. */ rx_stats->c_nf_ctl[i] = rx_stats->c_nf; rx_stats->c_nf_ext[i] = rx_stats->c_nf; cc++; } if (cc > 0) rx_stats->r_flags |= (IEEE80211_R_C_NF | IEEE80211_R_C_RSSI); } /* XXX-NET80211 We are not going to populate c_phytype! */ switch (rx_status->encoding) { case RX_ENC_LEGACY: { uint32_t legacy = 0; supband = hw->wiphy->bands[rx_status->band]; if (supband != NULL) legacy = supband->bitrates[rx_status->rate_idx].bitrate; rx_stats->c_rate = legacy; rxrate.legacy = legacy; /* Is there a LinuxKPI way of reporting IEEE80211_RX_F_CCK / _OFDM? */ break; } case RX_ENC_HT: rx_stats->c_pktflags |= IEEE80211_RX_F_HT; rx_stats->c_rate = rx_status->rate_idx; /* mcs */ rxrate.flags |= RATE_INFO_FLAGS_MCS; rxrate.mcs = rx_status->rate_idx; if ((rx_status->enc_flags & RX_ENC_FLAG_SHORT_GI) != 0) { rx_stats->c_pktflags |= IEEE80211_RX_F_SHORTGI; rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI; } break; case RX_ENC_VHT: rx_stats->c_pktflags |= IEEE80211_RX_F_VHT; rx_stats->c_rate = rx_status->rate_idx; /* mcs */ rx_stats->c_vhtnss = rx_status->nss; rxrate.flags |= RATE_INFO_FLAGS_VHT_MCS; rxrate.mcs = rx_status->rate_idx; rxrate.nss = rx_status->nss; if ((rx_status->enc_flags & RX_ENC_FLAG_SHORT_GI) != 0) { rx_stats->c_pktflags |= IEEE80211_RX_F_SHORTGI; rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI; } break; case RX_ENC_HE: rxrate.flags |= RATE_INFO_FLAGS_HE_MCS; rxrate.mcs = rx_status->rate_idx; rxrate.nss = rx_status->nss; /* XXX TODO */ TODO("net80211 has not matching encoding for %u", rx_status->encoding); break; case RX_ENC_EHT: rxrate.flags |= RATE_INFO_FLAGS_EHT_MCS; rxrate.mcs = rx_status->rate_idx; rxrate.nss = rx_status->nss; /* XXX TODO */ TODO("net80211 has not matching encoding for %u", rx_status->encoding); break; } rxrate.bw = rx_status->bw; switch (rx_status->bw) { case RATE_INFO_BW_20: rx_stats->c_width = IEEE80211_RX_FW_20MHZ; break; case RATE_INFO_BW_40: rx_stats->c_width = IEEE80211_RX_FW_40MHZ; break; case RATE_INFO_BW_80: rx_stats->c_width = IEEE80211_RX_FW_80MHZ; break; case RATE_INFO_BW_160: rx_stats->c_width = IEEE80211_RX_FW_160MHZ; break; case RATE_INFO_BW_320: case RATE_INFO_BW_HE_RU: case RATE_INFO_BW_EHT_RU: case RATE_INFO_BW_5: case RATE_INFO_BW_10: TODO("net80211 has not matching bandwidth for %u", rx_status->bw); break; } if ((rx_status->enc_flags & RX_ENC_FLAG_LDPC) != 0) rx_stats->c_pktflags |= IEEE80211_RX_F_LDPC; if ((rx_status->enc_flags & RX_ENC_FLAG_STBC_MASK) != 0) rx_stats->c_pktflags |= IEEE80211_RX_F_STBC; /* * We only need these for LKPI_80211_HW_CRYPTO in theory but in * case the hardware does something we do not expect always leave * these enabled. Leaving this commant as documentation for the || 1. */ #if defined(LKPI_80211_HW_CRYPTO) || 1 if (rx_status->flag & RX_FLAG_DECRYPTED) { rx_stats->c_pktflags |= IEEE80211_RX_F_DECRYPTED; /* Only valid if decrypted is set. */ if (rx_status->flag & RX_FLAG_PN_VALIDATED) rx_stats->c_pktflags |= IEEE80211_RX_F_PN_VALIDATED; } if (rx_status->flag & RX_FLAG_IV_STRIPPED) rx_stats->c_pktflags |= IEEE80211_RX_F_IV_STRIP; if (rx_status->flag & RX_FLAG_ICV_STRIPPED) rx_stats->c_pktflags |= IEEE80211_RX_F_ICV_STRIP; if (rx_status->flag & RX_FLAG_MIC_STRIPPED) rx_stats->c_pktflags |= IEEE80211_RX_F_MIC_STRIP; if (rx_status->flag & RX_FLAG_MMIC_STRIPPED) rx_stats->c_pktflags |= IEEE80211_RX_F_MMIC_STRIP; if (rx_status->flag & RX_FLAG_MMIC_ERROR) rx_stats->c_pktflags |= IEEE80211_RX_F_FAIL_MMIC; if (rx_status->flag & RX_FLAG_FAILED_FCS_CRC) rx_stats->c_pktflags |= IEEE80211_RX_F_FAIL_FCSCRC; #endif if (lsta != NULL) { memcpy(&lsta->sinfo.rxrate, &rxrate, sizeof(rxrate)); lsta->sinfo.filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); } } /* For %list see comment towards the end of the function. */ void linuxkpi_ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_sta *sta, struct napi_struct *napi __unused, struct list_head *list __unused) { struct lkpi_hw *lhw; struct ieee80211com *ic; struct mbuf *m; struct skb_shared_info *shinfo; struct ieee80211_rx_status *rx_status; struct ieee80211_rx_stats rx_stats; struct ieee80211_node *ni; struct ieee80211vap *vap; struct ieee80211_hdr *hdr; struct lkpi_sta *lsta; int i, offset, ok, error; uint8_t rssi; bool is_beacon; lhw = HW_TO_LHW(hw); ic = lhw->ic; if (skb->len < 2) { /* Need 80211 stats here. */ counter_u64_add(ic->ic_ierrors, 1); IMPROVE(); goto err; } /* * For now do the data copy; we can later improve things. Might even * have an mbuf backing the skb data then? */ m = m_get2(skb->len, M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { counter_u64_add(ic->ic_ierrors, 1); goto err; } m_copyback(m, 0, skb->tail - skb->data, skb->data); shinfo = skb_shinfo(skb); offset = m->m_len; for (i = 0; i < shinfo->nr_frags; i++) { m_copyback(m, offset, shinfo->frags[i].size, (uint8_t *)linux_page_address(shinfo->frags[i].page) + shinfo->frags[i].offset); offset += shinfo->frags[i].size; } rx_status = IEEE80211_SKB_RXCB(skb); hdr = (void *)skb->data; is_beacon = ieee80211_is_beacon(hdr->frame_control); #ifdef LINUXKPI_DEBUG_80211 if (is_beacon && (linuxkpi_debug_80211 & D80211_TRACE_RX_BEACONS) == 0) goto no_trace_beacons; if (linuxkpi_debug_80211 & D80211_TRACE_RX) printf("TRACE-RX: %s: skb %p l/d/t-len (%u/%u/%u) " "h %p d %p t %p e %p sh %p (%u) m %p plen %u len %u%s\n", __func__, skb, skb->len, skb->data_len, skb->truesize, skb->head, skb->data, skb->tail, skb->end, shinfo, shinfo->nr_frags, m, m->m_pkthdr.len, m->m_len, is_beacon ? " beacon" : ""); if (linuxkpi_debug_80211 & D80211_TRACE_RX_DUMP) hexdump(mtod(m, const void *), m->m_len, "RX (raw) ", 0); /* Implement a dump_rxcb() !!! */ if (linuxkpi_debug_80211 & D80211_TRACE_RX) printf("TRACE-RX: %s: RXCB: %ju %ju %u, %b, %u, %#0x, %#0x, " "%u band %u, %u { %d %d %d %d }, %d, %#x %#x %#x %#x %u %u %u\n", __func__, (uintmax_t)rx_status->boottime_ns, (uintmax_t)rx_status->mactime, rx_status->device_timestamp, rx_status->flag, IEEE80211_RX_STATUS_FLAGS_BITS, rx_status->freq, rx_status->bw, rx_status->encoding, rx_status->ampdu_reference, rx_status->band, rx_status->chains, rx_status->chain_signal[0], rx_status->chain_signal[1], rx_status->chain_signal[2], rx_status->chain_signal[3], rx_status->signal, rx_status->enc_flags, rx_status->he_dcm, rx_status->he_gi, rx_status->he_ru, rx_status->zero_length_psdu_type, rx_status->nss, rx_status->rate_idx); no_trace_beacons: #endif lsta = NULL; if (sta != NULL) { lsta = STA_TO_LSTA(sta); ni = ieee80211_ref_node(lsta->ni); } else { struct ieee80211_frame_min *wh; wh = mtod(m, struct ieee80211_frame_min *); ni = ieee80211_find_rxnode(ic, wh); if (ni != NULL) lsta = ni->ni_drv_data; } rssi = 0; lkpi_convert_rx_status(hw, lsta, rx_status, &rx_stats, &rssi); ok = ieee80211_add_rx_params(m, &rx_stats); if (ok == 0) { m_freem(m); counter_u64_add(ic->ic_ierrors, 1); goto err; } if (ni != NULL) vap = ni->ni_vap; else /* * XXX-BZ can we improve this by looking at the frame hdr * or other meta-data passed up? */ vap = TAILQ_FIRST(&ic->ic_vaps); #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_RX) printf("TRACE-RX: %s: sta %p lsta %p state %d ni %p vap %p%s\n", __func__, sta, lsta, (lsta != NULL) ? lsta->state : -1, ni, vap, is_beacon ? " beacon" : ""); #endif if (ni != NULL && vap != NULL && is_beacon && rx_status->device_timestamp > 0 && m->m_pkthdr.len >= sizeof(struct ieee80211_frame)) { struct lkpi_vif *lvif; struct ieee80211_vif *vif; struct ieee80211_frame *wh; wh = mtod(m, struct ieee80211_frame *); if (!IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_bssid)) goto skip_device_ts; lvif = VAP_TO_LVIF(vap); vif = LVIF_TO_VIF(lvif); IMPROVE("TIMING_BEACON_ONLY?"); /* mac80211 specific (not net80211) so keep it here. */ vif->bss_conf.sync_device_ts = rx_status->device_timestamp; /* * net80211 should take care of the other information (sync_tsf, * sync_dtim_count) as otherwise we need to parse the beacon. */ skip_device_ts: ; } if (vap != NULL && vap->iv_state > IEEE80211_S_INIT && ieee80211_radiotap_active_vap(vap)) { struct lkpi_radiotap_rx_hdr *rtap; rtap = &lhw->rtap_rx; rtap->wr_tsft = rx_status->device_timestamp; rtap->wr_flags = 0; if (rx_status->enc_flags & RX_ENC_FLAG_SHORTPRE) rtap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; if (rx_status->enc_flags & RX_ENC_FLAG_SHORT_GI) rtap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; #if 0 /* .. or it does not given we strip it below. */ if (ieee80211_hw_check(hw, RX_INCLUDES_FCS)) rtap->wr_flags |= IEEE80211_RADIOTAP_F_FCS; #endif if (rx_status->flag & RX_FLAG_FAILED_FCS_CRC) rtap->wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; rtap->wr_rate = 0; IMPROVE(); /* XXX TODO status->encoding / rate_index / bw */ rtap->wr_chan_freq = htole16(rx_stats.c_freq); if (ic->ic_curchan->ic_ieee == rx_stats.c_ieee) rtap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); rtap->wr_dbm_antsignal = rssi; rtap->wr_dbm_antnoise = rx_stats.c_nf; } if (ieee80211_hw_check(hw, RX_INCLUDES_FCS)) m_adj(m, -IEEE80211_CRC_LEN); #if 0 if (list != NULL) { /* * Normally this would be queued up and delivered by * netif_receive_skb_list(), napi_gro_receive(), or the like. * See mt76::mac80211.c as only current possible consumer. */ IMPROVE("we simply pass the packet to net80211 to deal with."); } #endif /* Attach meta-information to the mbuf for the deferred RX path. */ if (ni != NULL) { #ifdef LKPI_80211_USE_MTAG struct m_tag *mtag; struct lkpi_80211_tag_rxni *rxni; mtag = m_tag_alloc(MTAG_ABI_LKPI80211, LKPI80211_TAG_RXNI, sizeof(*rxni), IEEE80211_M_NOWAIT); if (mtag == NULL) { m_freem(m); counter_u64_add(ic->ic_ierrors, 1); goto err; } rxni = (struct lkpi_80211_tag_rxni *)(mtag + 1); rxni->ni = ni; /* We hold a reference. */ m_tag_prepend(m, mtag); #else m->m_pkthdr.PH_loc.ptr = ni; /* We hold a reference. */ #endif } LKPI_80211_LHW_RXQ_LOCK(lhw); if (lhw->rxq_stopped) { LKPI_80211_LHW_RXQ_UNLOCK(lhw); m_freem(m); counter_u64_add(ic->ic_ierrors, 1); goto err; } error = mbufq_enqueue(&lhw->rxq, m); if (error != 0) { LKPI_80211_LHW_RXQ_UNLOCK(lhw); m_freem(m); counter_u64_add(ic->ic_ierrors, 1); #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_RX) ic_printf(ni->ni_ic, "%s: mbufq_enqueue failed: %d\n", __func__, error); #endif goto err; } taskqueue_enqueue(taskqueue_thread, &lhw->rxq_task); LKPI_80211_LHW_RXQ_UNLOCK(lhw); IMPROVE(); err: /* The skb is ours so we can free it :-) */ kfree_skb(skb); } uint8_t linuxkpi_ieee80211_get_tid(struct ieee80211_hdr *hdr, bool nonqos_ok) { const struct ieee80211_frame *wh; uint8_t tid; /* Linux seems to assume this is a QOS-Data-Frame */ KASSERT(nonqos_ok || ieee80211_is_data_qos(hdr->frame_control), ("%s: hdr %p fc %#06x not qos_data\n", __func__, hdr, hdr->frame_control)); wh = (const struct ieee80211_frame *)hdr; tid = ieee80211_gettid(wh); KASSERT(nonqos_ok || tid == (tid & IEEE80211_QOS_TID), ("%s: tid %u " "not expected (%u?)\n", __func__, tid, IEEE80211_NONQOS_TID)); return (tid); } /* -------------------------------------------------------------------------- */ static void lkpi_wiphy_work(struct work_struct *work) { struct lkpi_wiphy *lwiphy; struct wiphy *wiphy; struct wiphy_work *wk; lwiphy = container_of(work, struct lkpi_wiphy, wwk); wiphy = LWIPHY_TO_WIPHY(lwiphy); wiphy_lock(wiphy); LKPI_80211_LWIPHY_WORK_LOCK(lwiphy); wk = list_first_entry_or_null(&lwiphy->wwk_list, struct wiphy_work, entry); /* If there is nothing we do nothing. */ if (wk == NULL) { LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy); wiphy_unlock(wiphy); return; } list_del_init(&wk->entry); /* More work to do? */ if (!list_empty(&lwiphy->wwk_list)) schedule_work(work); LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy); /* Finally call the (*wiphy_work_fn)() function. */ wk->fn(wiphy, wk); wiphy_unlock(wiphy); } void linuxkpi_wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *wwk) { struct lkpi_wiphy *lwiphy; lwiphy = WIPHY_TO_LWIPHY(wiphy); LKPI_80211_LWIPHY_WORK_LOCK(lwiphy); /* Do not double-queue. */ if (list_empty(&wwk->entry)) list_add_tail(&wwk->entry, &lwiphy->wwk_list); LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy); /* * See how ieee80211_queue_work() work continues in Linux or if things * migrate here over time? * Use a system queue from linux/workqueue.h for now. */ queue_work(system_wq, &lwiphy->wwk); } void linuxkpi_wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *wwk) { struct lkpi_wiphy *lwiphy; lwiphy = WIPHY_TO_LWIPHY(wiphy); LKPI_80211_LWIPHY_WORK_LOCK(lwiphy); /* Only cancel if queued. */ if (!list_empty(&wwk->entry)) list_del_init(&wwk->entry); LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy); } void linuxkpi_wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *wwk) { struct lkpi_wiphy *lwiphy; struct wiphy_work *wk; lwiphy = WIPHY_TO_LWIPHY(wiphy); LKPI_80211_LWIPHY_WORK_LOCK(lwiphy); /* If wwk is unset, flush everything; called when wiphy is shut down. */ if (wwk != NULL && list_empty(&wwk->entry)) { LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy); return; } while (!list_empty(&lwiphy->wwk_list)) { wk = list_first_entry(&lwiphy->wwk_list, struct wiphy_work, entry); list_del_init(&wk->entry); LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy); wk->fn(wiphy, wk); LKPI_80211_LWIPHY_WORK_LOCK(lwiphy); if (wk == wwk) break; } LKPI_80211_LWIPHY_WORK_UNLOCK(lwiphy); } void lkpi_wiphy_delayed_work_timer(struct timer_list *tl) { struct wiphy_delayed_work *wdwk; wdwk = from_timer(wdwk, tl, timer); wiphy_work_queue(wdwk->wiphy, &wdwk->work); } void linuxkpi_wiphy_delayed_work_queue(struct wiphy *wiphy, struct wiphy_delayed_work *wdwk, unsigned long delay) { if (delay == 0) { /* Run right away. */ del_timer(&wdwk->timer); wiphy_work_queue(wiphy, &wdwk->work); } else { wdwk->wiphy = wiphy; mod_timer(&wdwk->timer, jiffies + delay); } } void linuxkpi_wiphy_delayed_work_cancel(struct wiphy *wiphy, struct wiphy_delayed_work *wdwk) { del_timer_sync(&wdwk->timer); wiphy_work_cancel(wiphy, &wdwk->work); } /* -------------------------------------------------------------------------- */ struct wiphy * linuxkpi_wiphy_new(const struct cfg80211_ops *ops, size_t priv_len) { struct lkpi_wiphy *lwiphy; struct wiphy *wiphy; lwiphy = kzalloc(sizeof(*lwiphy) + priv_len, GFP_KERNEL); if (lwiphy == NULL) return (NULL); lwiphy->ops = ops; LKPI_80211_LWIPHY_WORK_LOCK_INIT(lwiphy); INIT_LIST_HEAD(&lwiphy->wwk_list); INIT_WORK(&lwiphy->wwk, lkpi_wiphy_work); wiphy = LWIPHY_TO_WIPHY(lwiphy); mutex_init(&wiphy->mtx); TODO(); return (wiphy); } void linuxkpi_wiphy_free(struct wiphy *wiphy) { struct lkpi_wiphy *lwiphy; if (wiphy == NULL) return; linuxkpi_wiphy_work_flush(wiphy, NULL); mutex_destroy(&wiphy->mtx); lwiphy = WIPHY_TO_LWIPHY(wiphy); LKPI_80211_LWIPHY_WORK_LOCK_DESTROY(lwiphy); kfree(lwiphy); } static uint32_t lkpi_cfg80211_calculate_bitrate_ht(struct rate_info *rate) { TODO("cfg80211_calculate_bitrate_ht"); return (rate->legacy); } static uint32_t lkpi_cfg80211_calculate_bitrate_vht(struct rate_info *rate) { TODO("cfg80211_calculate_bitrate_vht"); return (rate->legacy); } uint32_t linuxkpi_cfg80211_calculate_bitrate(struct rate_info *rate) { /* Beware: order! */ if (rate->flags & RATE_INFO_FLAGS_MCS) return (lkpi_cfg80211_calculate_bitrate_ht(rate)); if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) return (lkpi_cfg80211_calculate_bitrate_vht(rate)); IMPROVE("HE/EHT/..."); return (rate->legacy); } uint32_t linuxkpi_ieee80211_channel_to_frequency(uint32_t channel, enum nl80211_band band) { switch (band) { case NL80211_BAND_2GHZ: return (ieee80211_ieee2mhz(channel, IEEE80211_CHAN_2GHZ)); break; case NL80211_BAND_5GHZ: return (ieee80211_ieee2mhz(channel, IEEE80211_CHAN_5GHZ)); break; default: /* XXX abort, retry, error, panic? */ break; } return (0); } uint32_t linuxkpi_ieee80211_frequency_to_channel(uint32_t freq, uint32_t flags __unused) { return (ieee80211_mhz2ieee(freq, 0)); } #if 0 static struct lkpi_sta * lkpi_find_lsta_by_ni(struct lkpi_vif *lvif, struct ieee80211_node *ni) { struct lkpi_sta *lsta, *temp; rcu_read_lock(); list_for_each_entry_rcu(lsta, &lvif->lsta_list, lsta_list) { if (lsta->ni == ni) { rcu_read_unlock(); return (lsta); } } rcu_read_unlock(); return (NULL); } #endif struct ieee80211_sta * linuxkpi_ieee80211_find_sta(struct ieee80211_vif *vif, const u8 *peer) { struct lkpi_vif *lvif; struct lkpi_sta *lsta; struct ieee80211_sta *sta; lvif = VIF_TO_LVIF(vif); rcu_read_lock(); list_for_each_entry_rcu(lsta, &lvif->lsta_list, lsta_list) { sta = LSTA_TO_STA(lsta); if (IEEE80211_ADDR_EQ(sta->addr, peer)) { rcu_read_unlock(); return (sta); } } rcu_read_unlock(); return (NULL); } struct ieee80211_sta * linuxkpi_ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, const uint8_t *addr, const uint8_t *ourvifaddr) { struct lkpi_hw *lhw; struct lkpi_vif *lvif; struct lkpi_sta *lsta; struct ieee80211_vif *vif; struct ieee80211_sta *sta; lhw = wiphy_priv(hw->wiphy); sta = NULL; LKPI_80211_LHW_LVIF_LOCK(lhw); TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) { /* XXX-BZ check our address from the vif. */ vif = LVIF_TO_VIF(lvif); if (ourvifaddr != NULL && !IEEE80211_ADDR_EQ(vif->addr, ourvifaddr)) continue; sta = linuxkpi_ieee80211_find_sta(vif, addr); if (sta != NULL) break; } LKPI_80211_LHW_LVIF_UNLOCK(lhw); if (sta != NULL) { lsta = STA_TO_LSTA(sta); if (!lsta->added_to_drv) return (NULL); } return (sta); } struct sk_buff * linuxkpi_ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct lkpi_txq *ltxq; struct lkpi_vif *lvif; struct sk_buff *skb; skb = NULL; ltxq = TXQ_TO_LTXQ(txq); ltxq->seen_dequeue = true; if (ltxq->stopped) goto stopped; lvif = VIF_TO_LVIF(ltxq->txq.vif); if (lvif->hw_queue_stopped[ltxq->txq.ac]) { ltxq->stopped = true; goto stopped; } IMPROVE("hw(TX_FRAG_LIST)"); LKPI_80211_LTXQ_LOCK(ltxq); skb = skb_dequeue(<xq->skbq); LKPI_80211_LTXQ_UNLOCK(ltxq); stopped: return (skb); } void linuxkpi_ieee80211_txq_get_depth(struct ieee80211_txq *txq, unsigned long *frame_cnt, unsigned long *byte_cnt) { struct lkpi_txq *ltxq; struct sk_buff *skb; unsigned long fc, bc; ltxq = TXQ_TO_LTXQ(txq); fc = bc = 0; LKPI_80211_LTXQ_LOCK(ltxq); skb_queue_walk(<xq->skbq, skb) { fc++; bc += skb->len; } LKPI_80211_LTXQ_UNLOCK(ltxq); if (frame_cnt) *frame_cnt = fc; if (byte_cnt) *byte_cnt = bc; /* Validate that this is doing the correct thing. */ /* Should we keep track on en/dequeue? */ IMPROVE(); } /* * We are called from ieee80211_free_txskb() or ieee80211_tx_status(). * The latter tries to derive the success status from the info flags * passed back from the driver. rawx_mit() saves the ni on the m and the * m on the skb for us to be able to give feedback to net80211. */ static void _lkpi_ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb, int status) { struct ieee80211_node *ni; struct mbuf *m; m = skb->m; skb->m = NULL; if (m != NULL) { ni = m->m_pkthdr.PH_loc.ptr; /* Status: 0 is ok, != 0 is error. */ ieee80211_tx_complete(ni, m, status); /* ni & mbuf were consumed. */ } } void linuxkpi_ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb, int status) { _lkpi_ieee80211_free_txskb(hw, skb, status); kfree_skb(skb); } void linuxkpi_ieee80211_tx_status_ext(struct ieee80211_hw *hw, struct ieee80211_tx_status *txstat) { struct sk_buff *skb; struct ieee80211_tx_info *info; struct ieee80211_ratectl_tx_status txs; struct ieee80211_node *ni; int status; skb = txstat->skb; if (skb->m != NULL) { struct mbuf *m; m = skb->m; ni = m->m_pkthdr.PH_loc.ptr; memset(&txs, 0, sizeof(txs)); } else { ni = NULL; } info = txstat->info; if (info->flags & IEEE80211_TX_STAT_ACK) { status = 0; /* No error. */ txs.status = IEEE80211_RATECTL_TX_SUCCESS; } else { status = 1; txs.status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; } if (ni != NULL) { txs.pktlen = skb->len; txs.flags |= IEEE80211_RATECTL_STATUS_PKTLEN; if (info->status.rates[0].count > 1) { txs.long_retries = info->status.rates[0].count - 1; /* 1 + retries in drivers. */ txs.flags |= IEEE80211_RATECTL_STATUS_LONG_RETRY; } #if 0 /* Unused in net80211 currently. */ /* XXX-BZ convert check .flags for MCS/VHT/.. */ txs.final_rate = info->status.rates[0].idx; txs.flags |= IEEE80211_RATECTL_STATUS_FINAL_RATE; #endif if (info->status.flags & IEEE80211_TX_STATUS_ACK_SIGNAL_VALID) { txs.rssi = info->status.ack_signal; /* XXX-BZ CONVERT? */ txs.flags |= IEEE80211_RATECTL_STATUS_RSSI; } IMPROVE("only update rate if needed but that requires us to get a proper rate from mo_sta_statistics"); ieee80211_ratectl_tx_complete(ni, &txs); ieee80211_ratectl_rate(ni->ni_vap->iv_bss, NULL, 0); #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_TX) { printf("TX-RATE: %s: long_retries %d\n", __func__, txs.long_retries); } #endif } #ifdef LINUXKPI_DEBUG_80211 if (linuxkpi_debug_80211 & D80211_TRACE_TX) printf("TX-STATUS: %s: hw %p skb %p status %d : flags %#x " "band %u hw_queue %u tx_time_est %d : " "rates [ %u %u %#x, %u %u %#x, %u %u %#x, %u %u %#x ] " "ack_signal %u ampdu_ack_len %u ampdu_len %u antenna %u " "tx_time %u flags %#x " "status_driver_data [ %p %p ]\n", __func__, hw, skb, status, info->flags, info->band, info->hw_queue, info->tx_time_est, info->status.rates[0].idx, info->status.rates[0].count, info->status.rates[0].flags, info->status.rates[1].idx, info->status.rates[1].count, info->status.rates[1].flags, info->status.rates[2].idx, info->status.rates[2].count, info->status.rates[2].flags, info->status.rates[3].idx, info->status.rates[3].count, info->status.rates[3].flags, info->status.ack_signal, info->status.ampdu_ack_len, info->status.ampdu_len, info->status.antenna, info->status.tx_time, info->status.flags, info->status.status_driver_data[0], info->status.status_driver_data[1]); #endif if (txstat->free_list) { _lkpi_ieee80211_free_txskb(hw, skb, status); list_add_tail(&skb->list, txstat->free_list); } else { linuxkpi_ieee80211_free_txskb(hw, skb, status); } } void linuxkpi_ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ieee80211_tx_status status; memset(&status, 0, sizeof(status)); status.info = IEEE80211_SKB_CB(skb); status.skb = skb; /* sta, n_rates, rates, free_list? */ ieee80211_tx_status_ext(hw, &status); } /* * This is an internal bandaid for the moment for the way we glue * skbs and mbufs together for TX. Once we have skbs backed by * mbufs this should go away. * This is a public function but kept on the private KPI (lkpi_) * and is not exposed by a header file. */ static void lkpi_ieee80211_free_skb_mbuf(void *p) { struct ieee80211_node *ni; struct mbuf *m; if (p == NULL) return; m = (struct mbuf *)p; M_ASSERTPKTHDR(m); ni = m->m_pkthdr.PH_loc.ptr; m->m_pkthdr.PH_loc.ptr = NULL; if (ni != NULL) ieee80211_free_node(ni); m_freem(m); } void linuxkpi_ieee80211_queue_delayed_work(struct ieee80211_hw *hw, struct delayed_work *w, int delay) { struct lkpi_hw *lhw; /* Need to make sure hw is in a stable (non-suspended) state. */ IMPROVE(); lhw = HW_TO_LHW(hw); queue_delayed_work(lhw->workq, w, delay); } void linuxkpi_ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *w) { struct lkpi_hw *lhw; /* Need to make sure hw is in a stable (non-suspended) state. */ IMPROVE(); lhw = HW_TO_LHW(hw); queue_work(lhw->workq, w); } struct sk_buff * linuxkpi_ieee80211_probereq_get(struct ieee80211_hw *hw, uint8_t *addr, uint8_t *ssid, size_t ssid_len, size_t tailroom) { struct sk_buff *skb; struct ieee80211_frame *wh; uint8_t *p; size_t len; len = sizeof(*wh); len += 2 + ssid_len; skb = dev_alloc_skb(hw->extra_tx_headroom + len + tailroom); if (skb == NULL) return (NULL); skb_reserve(skb, hw->extra_tx_headroom); wh = skb_put_zero(skb, sizeof(*wh)); wh->i_fc[0] = IEEE80211_FC0_VERSION_0; wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_PROBE_REQ | IEEE80211_FC0_TYPE_MGT; IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); IEEE80211_ADDR_COPY(wh->i_addr2, addr); IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr); p = skb_put(skb, 2 + ssid_len); *p++ = IEEE80211_ELEMID_SSID; *p++ = ssid_len; if (ssid_len > 0) memcpy(p, ssid, ssid_len); return (skb); } struct sk_buff * linuxkpi_ieee80211_pspoll_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct lkpi_vif *lvif; struct ieee80211vap *vap; struct sk_buff *skb; struct ieee80211_frame_pspoll *psp; uint16_t v; skb = dev_alloc_skb(hw->extra_tx_headroom + sizeof(*psp)); if (skb == NULL) return (NULL); skb_reserve(skb, hw->extra_tx_headroom); lvif = VIF_TO_LVIF(vif); vap = LVIF_TO_VAP(lvif); psp = skb_put_zero(skb, sizeof(*psp)); psp->i_fc[0] = IEEE80211_FC0_VERSION_0; psp->i_fc[0] |= IEEE80211_FC0_SUBTYPE_PS_POLL | IEEE80211_FC0_TYPE_CTL; v = htole16(vif->cfg.aid | 1<<15 | 1<<16); memcpy(&psp->i_aid, &v, sizeof(v)); IEEE80211_ADDR_COPY(psp->i_bssid, vap->iv_bss->ni_macaddr); IEEE80211_ADDR_COPY(psp->i_ta, vif->addr); return (skb); } struct sk_buff * linuxkpi_ieee80211_nullfunc_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int linkid, bool qos) { struct lkpi_vif *lvif; struct ieee80211vap *vap; struct sk_buff *skb; struct ieee80211_frame *nullf; IMPROVE("linkid"); skb = dev_alloc_skb(hw->extra_tx_headroom + sizeof(*nullf)); if (skb == NULL) return (NULL); skb_reserve(skb, hw->extra_tx_headroom); lvif = VIF_TO_LVIF(vif); vap = LVIF_TO_VAP(lvif); nullf = skb_put_zero(skb, sizeof(*nullf)); nullf->i_fc[0] = IEEE80211_FC0_VERSION_0; nullf->i_fc[0] |= IEEE80211_FC0_SUBTYPE_NODATA | IEEE80211_FC0_TYPE_DATA; nullf->i_fc[1] = IEEE80211_FC1_DIR_TODS; IEEE80211_ADDR_COPY(nullf->i_addr1, vap->iv_bss->ni_bssid); IEEE80211_ADDR_COPY(nullf->i_addr2, vif->addr); IEEE80211_ADDR_COPY(nullf->i_addr3, vap->iv_bss->ni_macaddr); return (skb); } struct wireless_dev * linuxkpi_ieee80211_vif_to_wdev(struct ieee80211_vif *vif) { struct lkpi_vif *lvif; lvif = VIF_TO_LVIF(vif); return (&lvif->wdev); } void linuxkpi_ieee80211_connection_loss(struct ieee80211_vif *vif) { struct lkpi_vif *lvif; struct ieee80211vap *vap; enum ieee80211_state nstate; int arg; lvif = VIF_TO_LVIF(vif); vap = LVIF_TO_VAP(lvif); /* * Go to init; otherwise we need to elaborately check state and * handle accordingly, e.g., if in RUN we could call iv_bmiss. * Let the statemachine handle all neccessary changes. */ nstate = IEEE80211_S_INIT; arg = 0; /* Not a valid reason. */ ic_printf(vap->iv_ic, "%s: vif %p vap %p state %s\n", __func__, vif, vap, ieee80211_state_name[vap->iv_state]); ieee80211_new_state(vap, nstate, arg); } void linuxkpi_ieee80211_beacon_loss(struct ieee80211_vif *vif) { struct lkpi_vif *lvif; struct ieee80211vap *vap; lvif = VIF_TO_LVIF(vif); vap = LVIF_TO_VAP(lvif); ic_printf(vap->iv_ic, "%s: vif %p vap %p state %s\n", __func__, vif, vap, ieee80211_state_name[vap->iv_state]); ieee80211_beacon_miss(vap->iv_ic); } /* -------------------------------------------------------------------------- */ void linuxkpi_ieee80211_stop_queue(struct ieee80211_hw *hw, int qnum) { struct lkpi_hw *lhw; struct lkpi_vif *lvif; struct ieee80211_vif *vif; int ac_count, ac; KASSERT(qnum < hw->queues, ("%s: qnum %d >= hw->queues %d, hw %p\n", __func__, qnum, hw->queues, hw)); lhw = wiphy_priv(hw->wiphy); /* See lkpi_ic_vap_create(). */ if (hw->queues >= IEEE80211_NUM_ACS) ac_count = IEEE80211_NUM_ACS; else ac_count = 1; LKPI_80211_LHW_LVIF_LOCK(lhw); TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) { vif = LVIF_TO_VIF(lvif); for (ac = 0; ac < ac_count; ac++) { IMPROVE_TXQ("LOCKING"); if (qnum == vif->hw_queue[ac]) { #ifdef LINUXKPI_DEBUG_80211 /* * For now log this to better understand * how this is supposed to work. */ if (lvif->hw_queue_stopped[ac] && (linuxkpi_debug_80211 & D80211_IMPROVE_TXQ) != 0) ic_printf(lhw->ic, "%s:%d: lhw %p hw %p " "lvif %p vif %p ac %d qnum %d already " "stopped\n", __func__, __LINE__, lhw, hw, lvif, vif, ac, qnum); #endif lvif->hw_queue_stopped[ac] = true; } } } LKPI_80211_LHW_LVIF_UNLOCK(lhw); } void linuxkpi_ieee80211_stop_queues(struct ieee80211_hw *hw) { int i; IMPROVE_TXQ("Locking; do we need further info?"); for (i = 0; i < hw->queues; i++) linuxkpi_ieee80211_stop_queue(hw, i); } static void lkpi_ieee80211_wake_queues(struct ieee80211_hw *hw, int hwq) { struct lkpi_hw *lhw; struct lkpi_vif *lvif; struct lkpi_sta *lsta; int ac_count, ac, tid; /* See lkpi_ic_vap_create(). */ if (hw->queues >= IEEE80211_NUM_ACS) ac_count = IEEE80211_NUM_ACS; else ac_count = 1; lhw = wiphy_priv(hw->wiphy); IMPROVE_TXQ("Locking"); LKPI_80211_LHW_LVIF_LOCK(lhw); TAILQ_FOREACH(lvif, &lhw->lvif_head, lvif_entry) { struct ieee80211_vif *vif; vif = LVIF_TO_VIF(lvif); for (ac = 0; ac < ac_count; ac++) { if (hwq == vif->hw_queue[ac]) { /* XXX-BZ what about software scan? */ #ifdef LINUXKPI_DEBUG_80211 /* * For now log this to better understand * how this is supposed to work. */ if (!lvif->hw_queue_stopped[ac] && (linuxkpi_debug_80211 & D80211_IMPROVE_TXQ) != 0) ic_printf(lhw->ic, "%s:%d: lhw %p hw %p " "lvif %p vif %p ac %d hw_q not stopped\n", __func__, __LINE__, lhw, hw, lvif, vif, ac); #endif lvif->hw_queue_stopped[ac] = false; rcu_read_lock(); list_for_each_entry_rcu(lsta, &lvif->lsta_list, lsta_list) { struct ieee80211_sta *sta; sta = LSTA_TO_STA(lsta); for (tid = 0; tid < nitems(sta->txq); tid++) { struct lkpi_txq *ltxq; if (sta->txq[tid] == NULL) continue; if (sta->txq[tid]->ac != ac) continue; ltxq = TXQ_TO_LTXQ(sta->txq[tid]); if (!ltxq->stopped) continue; ltxq->stopped = false; /* XXX-BZ see when this explodes with all the locking. taskq? */ lkpi_80211_mo_wake_tx_queue(hw, sta->txq[tid]); } } rcu_read_unlock(); } } } LKPI_80211_LHW_LVIF_UNLOCK(lhw); } void linuxkpi_ieee80211_wake_queues(struct ieee80211_hw *hw) { int i; IMPROVE_TXQ("Is this all/enough here?"); for (i = 0; i < hw->queues; i++) lkpi_ieee80211_wake_queues(hw, i); } void linuxkpi_ieee80211_wake_queue(struct ieee80211_hw *hw, int qnum) { KASSERT(qnum < hw->queues, ("%s: qnum %d >= hw->queues %d, hw %p\n", __func__, qnum, hw->queues, hw)); lkpi_ieee80211_wake_queues(hw, qnum); } /* This is just hardware queues. */ void linuxkpi_ieee80211_txq_schedule_start(struct ieee80211_hw *hw, uint8_t ac) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); IMPROVE_TXQ("Are there reasons why we wouldn't schedule?"); IMPROVE_TXQ("LOCKING"); if (++lhw->txq_generation[ac] == 0) lhw->txq_generation[ac]++; } struct ieee80211_txq * linuxkpi_ieee80211_next_txq(struct ieee80211_hw *hw, uint8_t ac) { struct lkpi_hw *lhw; struct ieee80211_txq *txq; struct lkpi_txq *ltxq; lhw = HW_TO_LHW(hw); txq = NULL; IMPROVE_TXQ("LOCKING"); /* Check that we are scheduled. */ if (lhw->txq_generation[ac] == 0) goto out; ltxq = TAILQ_FIRST(&lhw->scheduled_txqs[ac]); if (ltxq == NULL) goto out; if (ltxq->txq_generation == lhw->txq_generation[ac]) goto out; ltxq->txq_generation = lhw->txq_generation[ac]; TAILQ_REMOVE(&lhw->scheduled_txqs[ac], ltxq, txq_entry); txq = <xq->txq; TAILQ_ELEM_INIT(ltxq, txq_entry); out: return (txq); } void linuxkpi_ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, bool withoutpkts) { struct lkpi_hw *lhw; struct lkpi_txq *ltxq; bool ltxq_empty; ltxq = TXQ_TO_LTXQ(txq); IMPROVE_TXQ("LOCKING"); /* Only schedule if work to do or asked to anyway. */ LKPI_80211_LTXQ_LOCK(ltxq); ltxq_empty = skb_queue_empty(<xq->skbq); LKPI_80211_LTXQ_UNLOCK(ltxq); if (!withoutpkts && ltxq_empty) goto out; /* * Make sure we do not double-schedule. We do this by checking tqe_prev, * the previous entry in our tailq. tqe_prev is always valid if this entry * is queued, tqe_next may be NULL if this is the only element in the list. */ if (ltxq->txq_entry.tqe_prev != NULL) goto out; lhw = HW_TO_LHW(hw); TAILQ_INSERT_TAIL(&lhw->scheduled_txqs[txq->ac], ltxq, txq_entry); out: return; } void linuxkpi_ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct lkpi_hw *lhw; struct ieee80211_txq *ntxq; struct ieee80211_tx_control control; struct sk_buff *skb; lhw = HW_TO_LHW(hw); LKPI_80211_LHW_TXQ_LOCK(lhw); ieee80211_txq_schedule_start(hw, txq->ac); do { ntxq = ieee80211_next_txq(hw, txq->ac); if (ntxq == NULL) break; memset(&control, 0, sizeof(control)); control.sta = ntxq->sta; do { skb = linuxkpi_ieee80211_tx_dequeue(hw, ntxq); if (skb == NULL) break; lkpi_80211_mo_tx(hw, &control, skb); } while(1); ieee80211_return_txq(hw, ntxq, false); } while (1); ieee80211_txq_schedule_end(hw, txq->ac); LKPI_80211_LHW_TXQ_UNLOCK(lhw); } /* -------------------------------------------------------------------------- */ struct lkpi_cfg80211_bss { u_int refcnt; struct cfg80211_bss bss; }; struct lkpi_cfg80211_get_bss_iter_lookup { struct wiphy *wiphy; struct linuxkpi_ieee80211_channel *chan; const uint8_t *bssid; const uint8_t *ssid; size_t ssid_len; enum ieee80211_bss_type bss_type; enum ieee80211_privacy privacy; /* * Something to store a copy of the result as the net80211 scan cache * is not refoucnted so a scan entry might go away any time. */ bool match; struct cfg80211_bss *bss; }; static void lkpi_cfg80211_get_bss_iterf(void *arg, const struct ieee80211_scan_entry *se) { struct lkpi_cfg80211_get_bss_iter_lookup *lookup; size_t ielen; lookup = arg; /* Do not try to find another match. */ if (lookup->match) return; /* Nothing to store result. */ if (lookup->bss == NULL) return; if (lookup->privacy != IEEE80211_PRIVACY_ANY) { /* if (se->se_capinfo & IEEE80211_CAPINFO_PRIVACY) */ /* We have no idea what to compare to as the drivers only request ANY */ return; } if (lookup->bss_type != IEEE80211_BSS_TYPE_ANY) { /* if (se->se_capinfo & (IEEE80211_CAPINFO_IBSS|IEEE80211_CAPINFO_ESS)) */ /* We have no idea what to compare to as the drivers only request ANY */ return; } if (lookup->chan != NULL) { struct linuxkpi_ieee80211_channel *chan; chan = linuxkpi_ieee80211_get_channel(lookup->wiphy, se->se_chan->ic_freq); if (chan == NULL || chan != lookup->chan) return; } if (lookup->bssid && !IEEE80211_ADDR_EQ(lookup->bssid, se->se_bssid)) return; if (lookup->ssid) { if (lookup->ssid_len != se->se_ssid[1] || se->se_ssid[1] == 0) return; if (memcmp(lookup->ssid, se->se_ssid+2, lookup->ssid_len) != 0) return; } ielen = se->se_ies.len; lookup->bss->ies = malloc(sizeof(*lookup->bss->ies) + ielen, M_LKPI80211, M_NOWAIT | M_ZERO); if (lookup->bss->ies == NULL) return; lookup->bss->ies->data = (uint8_t *)lookup->bss->ies + sizeof(*lookup->bss->ies); lookup->bss->ies->len = ielen; if (ielen) memcpy(lookup->bss->ies->data, se->se_ies.data, ielen); lookup->match = true; } struct cfg80211_bss * linuxkpi_cfg80211_get_bss(struct wiphy *wiphy, struct linuxkpi_ieee80211_channel *chan, const uint8_t *bssid, const uint8_t *ssid, size_t ssid_len, enum ieee80211_bss_type bss_type, enum ieee80211_privacy privacy) { struct lkpi_cfg80211_bss *lbss; struct lkpi_cfg80211_get_bss_iter_lookup lookup; struct lkpi_hw *lhw; struct ieee80211vap *vap; lhw = wiphy_priv(wiphy); /* Let's hope we can alloc. */ lbss = malloc(sizeof(*lbss), M_LKPI80211, M_NOWAIT | M_ZERO); if (lbss == NULL) { ic_printf(lhw->ic, "%s: alloc failed.\n", __func__); return (NULL); } lookup.wiphy = wiphy; lookup.chan = chan; lookup.bssid = bssid; lookup.ssid = ssid; lookup.ssid_len = ssid_len; lookup.bss_type = bss_type; lookup.privacy = privacy; lookup.match = false; lookup.bss = &lbss->bss; IMPROVE("Iterate over all VAPs comparing perm_addr and addresses?"); vap = TAILQ_FIRST(&lhw->ic->ic_vaps); ieee80211_scan_iterate(vap, lkpi_cfg80211_get_bss_iterf, &lookup); if (!lookup.match) { free(lbss, M_LKPI80211); return (NULL); } refcount_init(&lbss->refcnt, 1); return (&lbss->bss); } void linuxkpi_cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss) { struct lkpi_cfg80211_bss *lbss; lbss = container_of(bss, struct lkpi_cfg80211_bss, bss); /* Free everything again on refcount ... */ if (refcount_release(&lbss->refcnt)) { free(lbss->bss.ies, M_LKPI80211); free(lbss, M_LKPI80211); } } void linuxkpi_cfg80211_bss_flush(struct wiphy *wiphy) { struct lkpi_hw *lhw; struct ieee80211com *ic; struct ieee80211vap *vap; lhw = wiphy_priv(wiphy); ic = lhw->ic; /* * If we haven't called ieee80211_ifattach() yet * or there is no VAP, there are no scans to flush. */ if (ic == NULL || (lhw->sc_flags & LKPI_MAC80211_DRV_STARTED) == 0) return; /* Should only happen on the current one? Not seen it late enough. */ IEEE80211_LOCK(ic); TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) ieee80211_scan_flush(vap); IEEE80211_UNLOCK(ic); } /* -------------------------------------------------------------------------- */ /* * hw->conf get initialized/set in various places for us: * - linuxkpi_ieee80211_alloc_hw(): flags * - linuxkpi_ieee80211_ifattach(): chandef * - lkpi_ic_vap_create(): listen_interval * - lkpi_ic_set_channel(): chandef, flags */ int lkpi_80211_update_chandef(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *new) { struct cfg80211_chan_def *cd; uint32_t changed; int error; changed = 0; if (new == NULL || new->def.chan == NULL) cd = NULL; else cd = &new->def; if (cd && cd->chan != hw->conf.chandef.chan) { /* Copy; the chan pointer is fine and will stay valid. */ hw->conf.chandef = *cd; changed |= IEEE80211_CONF_CHANGE_CHANNEL; } IMPROVE("IEEE80211_CONF_CHANGE_PS, IEEE80211_CONF_CHANGE_POWER"); if (changed == 0) return (0); error = lkpi_80211_mo_config(hw, changed); return (error); } /* -------------------------------------------------------------------------- */ MODULE_VERSION(linuxkpi_wlan, 1); MODULE_DEPEND(linuxkpi_wlan, linuxkpi, 1, 1, 1); MODULE_DEPEND(linuxkpi_wlan, wlan, 1, 1, 1); diff --git a/sys/compat/linuxkpi/common/src/linux_80211_macops.c b/sys/compat/linuxkpi/common/src/linux_80211_macops.c index d7bd26a3d0e3..b7e232da48b0 100644 --- a/sys/compat/linuxkpi/common/src/linux_80211_macops.c +++ b/sys/compat/linuxkpi/common/src/linux_80211_macops.c @@ -1,756 +1,756 @@ /*- * Copyright (c) 2021-2022 The FreeBSD Foundation * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #define LINUXKPI_NET80211 #include #include "linux_80211.h" /* Could be a different tracing framework later. */ #ifdef LINUXKPI_DEBUG_80211 #define LKPI_80211_TRACE_MO(fmt, ...) \ if (linuxkpi_debug_80211 & D80211_TRACE_MO) \ - printf("LKPI_80211_TRACE_MO %s:%d: %d %d %u_" fmt "\n", \ + printf("LKPI_80211_TRACE_MO %s:%d: %d %d %lu_" fmt "\n", \ __func__, __LINE__, curcpu, curthread->td_tid, \ - (unsigned int)ticks, __VA_ARGS__) + jiffies, __VA_ARGS__) #else #define LKPI_80211_TRACE_MO(...) do { } while(0) #endif int lkpi_80211_mo_start(struct ieee80211_hw *hw) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->start == NULL) { error = EOPNOTSUPP; goto out; } if ((lhw->sc_flags & LKPI_MAC80211_DRV_STARTED)) { /* Trying to start twice is an error. */ error = EEXIST; goto out; } LKPI_80211_TRACE_MO("hw %p", hw); error = lhw->ops->start(hw); if (error == 0) lhw->sc_flags |= LKPI_MAC80211_DRV_STARTED; out: return (error); } void lkpi_80211_mo_stop(struct ieee80211_hw *hw, bool suspend) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->stop == NULL) return; LKPI_80211_TRACE_MO("hw %p suspend %d", hw, suspend); lhw->ops->stop(hw, suspend); lhw->sc_flags &= ~LKPI_MAC80211_DRV_STARTED; } int lkpi_80211_mo_get_antenna(struct ieee80211_hw *hw, u32 *txs, u32 *rxs) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->get_antenna == NULL) { error = EOPNOTSUPP; goto out; } LKPI_80211_TRACE_MO("hw %p", hw); error = lhw->ops->get_antenna(hw, txs, rxs); out: return (error); } int lkpi_80211_mo_set_frag_threshold(struct ieee80211_hw *hw, uint32_t frag_th) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->set_frag_threshold == NULL) { error = EOPNOTSUPP; goto out; } LKPI_80211_TRACE_MO("hw %p frag_th %u", hw, frag_th); error = lhw->ops->set_frag_threshold(hw, frag_th); out: return (error); } int lkpi_80211_mo_set_rts_threshold(struct ieee80211_hw *hw, uint32_t rts_th) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->set_rts_threshold == NULL) { error = EOPNOTSUPP; goto out; } LKPI_80211_TRACE_MO("hw %p rts_th %u", hw, rts_th); error = lhw->ops->set_rts_threshold(hw, rts_th); out: return (error); } int lkpi_80211_mo_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct lkpi_hw *lhw; struct lkpi_vif *lvif; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->add_interface == NULL) { error = EOPNOTSUPP; goto out; } lvif = VIF_TO_LVIF(vif); LKPI_80211_LVIF_LOCK(lvif); if (lvif->added_to_drv) { LKPI_80211_LVIF_UNLOCK(lvif); /* Trying to add twice is an error. */ error = EEXIST; goto out; } LKPI_80211_LVIF_UNLOCK(lvif); LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif); error = lhw->ops->add_interface(hw, vif); if (error == 0) { LKPI_80211_LVIF_LOCK(lvif); lvif->added_to_drv = true; LKPI_80211_LVIF_UNLOCK(lvif); } out: return (error); } void lkpi_80211_mo_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct lkpi_hw *lhw; struct lkpi_vif *lvif; lhw = HW_TO_LHW(hw); if (lhw->ops->remove_interface == NULL) return; lvif = VIF_TO_LVIF(vif); LKPI_80211_LVIF_LOCK(lvif); if (!lvif->added_to_drv) { LKPI_80211_LVIF_UNLOCK(lvif); return; } LKPI_80211_LVIF_UNLOCK(lvif); LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif); lhw->ops->remove_interface(hw, vif); LKPI_80211_LVIF_LOCK(lvif); lvif->added_to_drv = false; LKPI_80211_LVIF_UNLOCK(lvif); } int lkpi_80211_mo_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *sr) { struct lkpi_hw *lhw; int error; /* * MUST NOT return EPERM as that is a "magic number 1" based on rtw88 * driver indicating hw_scan is not supported despite the ops call * being available. */ lhw = HW_TO_LHW(hw); if (lhw->ops->hw_scan == NULL) { /* Return magic number to use sw scan. */ error = 1; goto out; } LKPI_80211_TRACE_MO("CALLING hw %p vif %p sr %p", hw, vif, sr); error = lhw->ops->hw_scan(hw, vif, sr); LKPI_80211_TRACE_MO("RETURNING hw %p vif %p sr %p error %d", hw, vif, sr, error); out: return (error); } void lkpi_80211_mo_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->cancel_hw_scan == NULL) return; LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif); lhw->ops->cancel_hw_scan(hw, vif); } void lkpi_80211_mo_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->sw_scan_complete == NULL) return; LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif); lhw->ops->sw_scan_complete(hw, vif); lhw->scan_flags &= ~LKPI_LHW_SCAN_RUNNING; } void lkpi_80211_mo_sw_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *addr) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->sw_scan_start == NULL) return; LKPI_80211_TRACE_MO("hw %p vif %p", hw, vif); lhw->ops->sw_scan_start(hw, vif, addr); } /* * We keep the Linux type here; it really is an uintptr_t. */ u64 lkpi_80211_mo_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { struct lkpi_hw *lhw; u64 ptr; lhw = HW_TO_LHW(hw); if (lhw->ops->prepare_multicast == NULL) return (0); LKPI_80211_TRACE_MO("hw %p mc_list %p", hw, mc_list); ptr = lhw->ops->prepare_multicast(hw, mc_list); return (ptr); } void lkpi_80211_mo_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 mc_ptr) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->configure_filter == NULL) return; if (mc_ptr == 0) return; LKPI_80211_TRACE_MO("hw %p changed_flags %#x total_flags %p mc_ptr %ju", hw, changed_flags, total_flags, (uintmax_t)mc_ptr); lhw->ops->configure_filter(hw, changed_flags, total_flags, mc_ptr); } /* * So far we only called sta_{add,remove} as an alternative to sta_state. * Let's keep the implementation simpler and hide sta_{add,remove} under the * hood here calling them if state_state is not available from mo_sta_state. */ static int lkpi_80211_mo_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct lkpi_hw *lhw; struct lkpi_sta *lsta; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->sta_add == NULL) { error = EOPNOTSUPP; goto out; } lsta = STA_TO_LSTA(sta); if (lsta->added_to_drv) { error = EEXIST; goto out; } LKPI_80211_TRACE_MO("hw %p vif %p sta %p", hw, vif, sta); error = lhw->ops->sta_add(hw, vif, sta); if (error == 0) lsta->added_to_drv = true; out: return error; } static int lkpi_80211_mo_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct lkpi_hw *lhw; struct lkpi_sta *lsta; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->sta_remove == NULL) { error = EOPNOTSUPP; goto out; } lsta = STA_TO_LSTA(sta); if (!lsta->added_to_drv) { /* If we never added the sta, do not complain on cleanup. */ error = 0; goto out; } LKPI_80211_TRACE_MO("hw %p vif %p sta %p", hw, vif, sta); error = lhw->ops->sta_remove(hw, vif, sta); if (error == 0) lsta->added_to_drv = false; out: return error; } int lkpi_80211_mo_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct lkpi_sta *lsta, enum ieee80211_sta_state nstate) { struct lkpi_hw *lhw; struct ieee80211_sta *sta; int error; lhw = HW_TO_LHW(hw); sta = LSTA_TO_STA(lsta); if (lhw->ops->sta_state != NULL) { LKPI_80211_TRACE_MO("hw %p vif %p sta %p nstate %d", hw, vif, sta, nstate); error = lhw->ops->sta_state(hw, vif, sta, lsta->state, nstate); if (error == 0) { if (nstate == IEEE80211_STA_NOTEXIST) lsta->added_to_drv = false; else lsta->added_to_drv = true; lsta->state = nstate; } goto out; } /* XXX-BZ is the change state AUTH or ASSOC here? */ if (lsta->state < IEEE80211_STA_ASSOC && nstate == IEEE80211_STA_ASSOC) { error = lkpi_80211_mo_sta_add(hw, vif, sta); if (error == 0) lsta->added_to_drv = true; } else if (lsta->state >= IEEE80211_STA_ASSOC && nstate < IEEE80211_STA_ASSOC) { error = lkpi_80211_mo_sta_remove(hw, vif, sta); if (error == 0) lsta->added_to_drv = false; } else /* Nothing to do. */ error = 0; if (error == 0) lsta->state = nstate; out: /* XXX-BZ should we manage state in here? */ return (error); } int lkpi_80211_mo_config(struct ieee80211_hw *hw, uint32_t changed) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->config == NULL) { error = EOPNOTSUPP; goto out; } LKPI_80211_TRACE_MO("hw %p changed %u", hw, changed); error = lhw->ops->config(hw, changed); out: return (error); } int lkpi_80211_mo_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf, struct ieee80211_chanctx_conf *chanctx_conf) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->assign_vif_chanctx == NULL) { error = EOPNOTSUPP; goto out; } LKPI_80211_TRACE_MO("hw %p vif %p bss_conf %p chanctx_conf %p", hw, vif, conf, chanctx_conf); error = lhw->ops->assign_vif_chanctx(hw, vif, conf, chanctx_conf); if (error == 0) vif->bss_conf.chanctx_conf = chanctx_conf; out: return (error); } void lkpi_80211_mo_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf, struct ieee80211_chanctx_conf *chanctx_conf) { struct lkpi_hw *lhw; might_sleep(); lockdep_assert_wiphy(hw->wiphy); lhw = HW_TO_LHW(hw); if (lhw->ops->unassign_vif_chanctx == NULL) return; if (chanctx_conf == NULL) return; LKPI_80211_TRACE_MO("hw %p vif %p bss_conf %p chanctx_conf %p", hw, vif, conf, chanctx_conf); lhw->ops->unassign_vif_chanctx(hw, vif, conf, chanctx_conf); } int lkpi_80211_mo_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *chanctx_conf) { struct lkpi_hw *lhw; struct lkpi_chanctx *lchanctx; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->add_chanctx == NULL) { error = EOPNOTSUPP; goto out; } LKPI_80211_TRACE_MO("hw %p chanctx_conf %p", hw, chanctx_conf); error = lhw->ops->add_chanctx(hw, chanctx_conf); if (error == 0) { lchanctx = CHANCTX_CONF_TO_LCHANCTX(chanctx_conf); lchanctx->added_to_drv = true; } out: return (error); } void lkpi_80211_mo_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *chanctx_conf, uint32_t changed) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->change_chanctx == NULL) return; LKPI_80211_TRACE_MO("hw %p chanctx_conf %p changed %u", hw, chanctx_conf, changed); lhw->ops->change_chanctx(hw, chanctx_conf, changed); } void lkpi_80211_mo_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *chanctx_conf) { struct lkpi_hw *lhw; struct lkpi_chanctx *lchanctx; lhw = HW_TO_LHW(hw); if (lhw->ops->remove_chanctx == NULL) return; LKPI_80211_TRACE_MO("hw %p chanctx_conf %p", hw, chanctx_conf); lhw->ops->remove_chanctx(hw, chanctx_conf); lchanctx = CHANCTX_CONF_TO_LCHANCTX(chanctx_conf); lchanctx->added_to_drv = false; } void lkpi_80211_mo_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf, uint64_t changed) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->link_info_changed == NULL && lhw->ops->bss_info_changed == NULL) return; if (changed == 0) return; LKPI_80211_TRACE_MO("hw %p vif %p conf %p changed %#jx", hw, vif, conf, (uintmax_t)changed); if (lhw->ops->link_info_changed != NULL) lhw->ops->link_info_changed(hw, vif, conf, changed); else lhw->ops->bss_info_changed(hw, vif, conf, changed); } int lkpi_80211_mo_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, uint32_t link_id, uint16_t ac, const struct ieee80211_tx_queue_params *txqp) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->conf_tx == NULL) { error = EOPNOTSUPP; goto out; } LKPI_80211_TRACE_MO("hw %p vif %p link_id %u ac %u txpq %p", hw, vif, link_id, ac, txqp); error = lhw->ops->conf_tx(hw, vif, link_id, ac, txqp); out: return (error); } void lkpi_80211_mo_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, uint32_t nqueues, bool drop) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->flush == NULL) return; LKPI_80211_TRACE_MO("hw %p vif %p nqueues %u drop %d", hw, vif, nqueues, drop); lhw->ops->flush(hw, vif, nqueues, drop); } void lkpi_80211_mo_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *txinfo) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->mgd_prepare_tx == NULL) return; LKPI_80211_TRACE_MO("hw %p vif %p txinfo %p", hw, vif, txinfo); lhw->ops->mgd_prepare_tx(hw, vif, txinfo); } void lkpi_80211_mo_mgd_complete_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *txinfo) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->mgd_complete_tx == NULL) return; LKPI_80211_TRACE_MO("hw %p vif %p txinfo %p", hw, vif, txinfo); lhw->ops->mgd_complete_tx(hw, vif, txinfo); } void lkpi_80211_mo_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *txctrl, struct sk_buff *skb) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->tx == NULL) return; LKPI_80211_TRACE_MO("hw %p txctrl %p skb %p", hw, txctrl, skb); lhw->ops->tx(hw, txctrl, skb); } void lkpi_80211_mo_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->wake_tx_queue == NULL) return; LKPI_80211_TRACE_MO("hw %p txq %p", hw, txq); lhw->ops->wake_tx_queue(hw, txq); } void lkpi_80211_mo_sync_rx_queues(struct ieee80211_hw *hw) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->sync_rx_queues == NULL) return; LKPI_80211_TRACE_MO("hw %p", hw); lhw->ops->sync_rx_queues(hw); } void lkpi_80211_mo_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->sta_pre_rcu_remove == NULL) return; LKPI_80211_TRACE_MO("hw %p vif %p sta %p", hw, vif, sta); lhw->ops->sta_pre_rcu_remove(hw, vif, sta); } int lkpi_80211_mo_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *kc) { struct lkpi_hw *lhw; int error; lockdep_assert_wiphy(hw->wiphy); lhw = HW_TO_LHW(hw); if (lhw->ops->set_key == NULL) { error = EOPNOTSUPP; goto out; } LKPI_80211_TRACE_MO("hw %p cmd %d vif %p sta %p kc %p", hw, cmd, vif, sta, kc); error = lhw->ops->set_key(hw, cmd, vif, sta, kc); out: return (error); } int lkpi_80211_mo_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->ampdu_action == NULL) { error = EOPNOTSUPP; goto out; } LKPI_80211_TRACE_MO("hw %p vif %p params %p { %p, %d, %u, %u, %u, %u, %d }", hw, vif, params, params->sta, params->action, params->buf_size, params->timeout, params->ssn, params->tid, params->amsdu); error = lhw->ops->ampdu_action(hw, vif, params); out: return (error); } int lkpi_80211_mo_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct station_info *sinfo) { struct lkpi_hw *lhw; struct lkpi_sta *lsta; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->sta_statistics == NULL) { error = EOPNOTSUPP; goto out; } lsta = STA_TO_LSTA(sta); if (!lsta->added_to_drv) { error = EEXIST; goto out; } lockdep_assert_wiphy(hw->wiphy); LKPI_80211_TRACE_MO("hw %p vif %p sta %p sinfo %p", hw, vif, sta, sinfo); lhw->ops->sta_statistics(hw, vif, sta, sinfo); error = 0; out: return (error); } diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c b/sys/compat/linuxkpi/common/src/linux_compat.c index e061504868fd..019e08f59d44 100644 --- a/sys/compat/linuxkpi/common/src/linux_compat.c +++ b/sys/compat/linuxkpi/common/src/linux_compat.c @@ -1,3005 +1,3005 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "opt_global.h" #include "opt_stack.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #include #endif #include #ifdef XENHVM #undef xen_pv_domain #undef xen_initial_domain /* xen/xen-os.h redefines __must_check */ #undef __must_check #include #endif SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "LinuxKPI parameters"); int linuxkpi_debug; SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); int linuxkpi_rcu_debug; SYSCTL_INT(_compat_linuxkpi, OID_AUTO, rcu_debug, CTLFLAG_RWTUN, &linuxkpi_rcu_debug, 0, "Set to enable RCU warning. Clear to disable."); int linuxkpi_warn_dump_stack = 0; SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN, &linuxkpi_warn_dump_stack, 0, "Set to enable stack traces from WARN_ON(). Clear to disable."); static struct timeval lkpi_net_lastlog; static int lkpi_net_curpps; static int lkpi_net_maxpps = 99; SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); #include /* Undo Linux compat changes. */ #undef RB_ROOT #undef file #undef cdev #define RB_ROOT(head) (head)->rbh_root static void linux_destroy_dev(struct linux_cdev *); static void linux_cdev_deref(struct linux_cdev *ldev); static struct vm_area_struct *linux_cdev_handle_find(void *handle); cpumask_t cpu_online_mask; static cpumask_t **static_single_cpu_mask; static cpumask_t *static_single_cpu_mask_lcs; struct kobject linux_class_root; struct device linux_root_device; struct class linux_class_misc; struct list_head pci_drivers; struct list_head pci_devices; spinlock_t pci_lock; struct uts_namespace init_uts_ns; unsigned long linux_timer_hz_mask; wait_queue_head_t linux_bit_waitq; wait_queue_head_t linux_var_waitq; int panic_cmp(struct rb_node *one, struct rb_node *two) { panic("no cmp"); } RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); #define START(node) ((node)->start) #define LAST(node) ((node)->last) INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START, LAST,, lkpi_interval_tree) static void linux_device_release(struct device *dev) { pr_debug("linux_device_release: %s\n", dev_name(dev)); kfree(dev); } static ssize_t linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct class, kobj), dattr, buf); return (error); } static ssize_t linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct class, kobj), dattr, buf, count); return (error); } static void linux_class_release(struct kobject *kobj) { struct class *class; class = container_of(kobj, struct class, kobj); if (class->class_release) class->class_release(class); } static const struct sysfs_ops linux_class_sysfs = { .show = linux_class_show, .store = linux_class_store, }; const struct kobj_type linux_class_ktype = { .release = linux_class_release, .sysfs_ops = &linux_class_sysfs }; static void linux_dev_release(struct kobject *kobj) { struct device *dev; dev = container_of(kobj, struct device, kobj); /* This is the precedence defined by linux. */ if (dev->release) dev->release(dev); else if (dev->class && dev->class->dev_release) dev->class->dev_release(dev); } static ssize_t linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct device, kobj), dattr, buf); return (error); } static ssize_t linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct device, kobj), dattr, buf, count); return (error); } static const struct sysfs_ops linux_dev_sysfs = { .show = linux_dev_show, .store = linux_dev_store, }; const struct kobj_type linux_dev_ktype = { .release = linux_dev_release, .sysfs_ops = &linux_dev_sysfs }; struct device * device_create(struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...) { struct device *dev; va_list args; dev = kzalloc(sizeof(*dev), M_WAITOK); dev->parent = parent; dev->class = class; dev->devt = devt; dev->driver_data = drvdata; dev->release = linux_device_release; va_start(args, fmt); kobject_set_name_vargs(&dev->kobj, fmt, args); va_end(args); device_register(dev); return (dev); } struct device * device_create_groups_vargs(struct class *class, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, va_list args) { struct device *dev = NULL; int retval = -ENODEV; if (class == NULL || IS_ERR(class)) goto error; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto error; } dev->devt = devt; dev->class = class; dev->parent = parent; dev->groups = groups; dev->release = device_create_release; /* device_initialize() needs the class and parent to be set */ device_initialize(dev); dev_set_drvdata(dev, drvdata); retval = kobject_set_name_vargs(&dev->kobj, fmt, args); if (retval) goto error; retval = device_add(dev); if (retval) goto error; return dev; error: put_device(dev); return ERR_PTR(retval); } struct class * lkpi_class_create(const char *name) { struct class *class; int error; class = kzalloc(sizeof(*class), M_WAITOK); class->name = name; class->class_release = linux_class_kfree; error = class_register(class); if (error) { kfree(class); return (NULL); } return (class); } static void linux_kq_lock(void *arg) { spinlock_t *s = arg; spin_lock(s); } static void linux_kq_unlock(void *arg) { spinlock_t *s = arg; spin_unlock(s); } static void linux_kq_assert_lock(void *arg, int what) { #ifdef INVARIANTS spinlock_t *s = arg; if (what == LA_LOCKED) mtx_assert(s, MA_OWNED); else mtx_assert(s, MA_NOTOWNED); #endif } static void linux_file_kqfilter_poll(struct linux_file *, int); struct linux_file * linux_file_alloc(void) { struct linux_file *filp; filp = kzalloc(sizeof(*filp), GFP_KERNEL); /* set initial refcount */ filp->f_count = 1; /* setup fields needed by kqueue support */ spin_lock_init(&filp->f_kqlock); knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); return (filp); } void linux_file_free(struct linux_file *filp) { if (filp->_file == NULL) { if (filp->f_op != NULL && filp->f_op->release != NULL) filp->f_op->release(filp->f_vnode, filp); if (filp->f_shmem != NULL) vm_object_deallocate(filp->f_shmem); kfree_rcu(filp, rcu); } else { /* * The close method of the character device or file * will free the linux_file structure: */ _fdrop(filp->_file, curthread); } } struct linux_cdev * cdev_alloc(void) { struct linux_cdev *cdev; cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK); kobject_init(&cdev->kobj, &linux_cdev_ktype); cdev->refs = 1; return (cdev); } static int linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) { struct vm_area_struct *vmap; vmap = linux_cdev_handle_find(vm_obj->handle); MPASS(vmap != NULL); MPASS(vmap->vm_private_data == vm_obj->handle); if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; vm_page_t page; if (((*mres)->flags & PG_FICTITIOUS) != 0) { /* * If the passed in result page is a fake * page, update it with the new physical * address. */ page = *mres; vm_page_updatefake(page, paddr, vm_obj->memattr); } else { /* * Replace the passed in "mres" page with our * own fake page and free up the all of the * original pages. */ VM_OBJECT_WUNLOCK(vm_obj); page = vm_page_getfake(paddr, vm_obj->memattr); VM_OBJECT_WLOCK(vm_obj); vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); *mres = page; } vm_page_valid(page); return (VM_PAGER_OK); } return (VM_PAGER_FAIL); } static int linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) { struct vm_area_struct *vmap; int err; /* get VM area structure */ vmap = linux_cdev_handle_find(vm_obj->handle); MPASS(vmap != NULL); MPASS(vmap->vm_private_data == vm_obj->handle); VM_OBJECT_WUNLOCK(vm_obj); linux_set_current(curthread); down_write(&vmap->vm_mm->mmap_sem); if (unlikely(vmap->vm_ops == NULL)) { err = VM_FAULT_SIGBUS; } else { struct vm_fault vmf; /* fill out VM fault structure */ vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; vmf.pgoff = 0; vmf.page = NULL; vmf.vma = vmap; vmap->vm_pfn_count = 0; vmap->vm_pfn_pcount = &vmap->vm_pfn_count; vmap->vm_obj = vm_obj; err = vmap->vm_ops->fault(&vmf); while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { kern_yield(PRI_USER); err = vmap->vm_ops->fault(&vmf); } } /* translate return code */ switch (err) { case VM_FAULT_OOM: err = VM_PAGER_AGAIN; break; case VM_FAULT_SIGBUS: err = VM_PAGER_BAD; break; case VM_FAULT_NOPAGE: /* * By contract the fault handler will return having * busied all the pages itself. If pidx is already * found in the object, it will simply xbusy the first * page and return with vm_pfn_count set to 1. */ *first = vmap->vm_pfn_first; *last = *first + vmap->vm_pfn_count - 1; err = VM_PAGER_OK; break; default: err = VM_PAGER_ERROR; break; } up_write(&vmap->vm_mm->mmap_sem); VM_OBJECT_WLOCK(vm_obj); return (err); } static struct rwlock linux_vma_lock; static TAILQ_HEAD(, vm_area_struct) linux_vma_head = TAILQ_HEAD_INITIALIZER(linux_vma_head); static void linux_cdev_handle_free(struct vm_area_struct *vmap) { /* Drop reference on vm_file */ if (vmap->vm_file != NULL) fput(vmap->vm_file); /* Drop reference on mm_struct */ mmput(vmap->vm_mm); kfree(vmap); } static void linux_cdev_handle_remove(struct vm_area_struct *vmap) { rw_wlock(&linux_vma_lock); TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); rw_wunlock(&linux_vma_lock); } static struct vm_area_struct * linux_cdev_handle_find(void *handle) { struct vm_area_struct *vmap; rw_rlock(&linux_vma_lock); TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { if (vmap->vm_private_data == handle) break; } rw_runlock(&linux_vma_lock); return (vmap); } static int linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { MPASS(linux_cdev_handle_find(handle) != NULL); *color = 0; return (0); } static void linux_cdev_pager_dtor(void *handle) { const struct vm_operations_struct *vm_ops; struct vm_area_struct *vmap; vmap = linux_cdev_handle_find(handle); MPASS(vmap != NULL); /* * Remove handle before calling close operation to prevent * other threads from reusing the handle pointer. */ linux_cdev_handle_remove(vmap); down_write(&vmap->vm_mm->mmap_sem); vm_ops = vmap->vm_ops; if (likely(vm_ops != NULL)) vm_ops->close(vmap); up_write(&vmap->vm_mm->mmap_sem); linux_cdev_handle_free(vmap); } static struct cdev_pager_ops linux_cdev_pager_ops[2] = { { /* OBJT_MGTDEVICE */ .cdev_pg_populate = linux_cdev_pager_populate, .cdev_pg_ctor = linux_cdev_pager_ctor, .cdev_pg_dtor = linux_cdev_pager_dtor }, { /* OBJT_DEVICE */ .cdev_pg_fault = linux_cdev_pager_fault, .cdev_pg_ctor = linux_cdev_pager_ctor, .cdev_pg_dtor = linux_cdev_pager_dtor }, }; int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size) { struct pctrie_iter pages; vm_object_t obj; vm_page_t m; obj = vma->vm_obj; if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) return (-ENOTSUP); VM_OBJECT_RLOCK(obj); vm_page_iter_limit_init(&pages, obj, OFF_TO_IDX(address + size)); VM_RADIX_FOREACH_FROM(m, &pages, OFF_TO_IDX(address)) pmap_remove_all(m); VM_OBJECT_RUNLOCK(obj); return (0); } void vma_set_file(struct vm_area_struct *vma, struct linux_file *file) { struct linux_file *tmp; /* Changing an anonymous vma with this is illegal */ get_file(file); tmp = vma->vm_file; vma->vm_file = file; fput(tmp); } static struct file_operations dummy_ldev_ops = { /* XXXKIB */ }; static struct linux_cdev dummy_ldev = { .ops = &dummy_ldev_ops, }; #define LDEV_SI_DTR 0x0001 #define LDEV_SI_REF 0x0002 static void linux_get_fop(struct linux_file *filp, const struct file_operations **fop, struct linux_cdev **dev) { struct linux_cdev *ldev; u_int siref; ldev = filp->f_cdev; *fop = filp->f_op; if (ldev != NULL) { if (ldev->kobj.ktype == &linux_cdev_static_ktype) { refcount_acquire(&ldev->refs); } else { for (siref = ldev->siref;;) { if ((siref & LDEV_SI_DTR) != 0) { ldev = &dummy_ldev; *fop = ldev->ops; siref = ldev->siref; MPASS((ldev->siref & LDEV_SI_DTR) == 0); } else if (atomic_fcmpset_int(&ldev->siref, &siref, siref + LDEV_SI_REF)) { break; } } } } *dev = ldev; } static void linux_drop_fop(struct linux_cdev *ldev) { if (ldev == NULL) return; if (ldev->kobj.ktype == &linux_cdev_static_ktype) { linux_cdev_deref(ldev); } else { MPASS(ldev->kobj.ktype == &linux_cdev_ktype); MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); atomic_subtract_int(&ldev->siref, LDEV_SI_REF); } } #define OPW(fp,td,code) ({ \ struct file *__fpop; \ __typeof(code) __retval; \ \ __fpop = (td)->td_fpop; \ (td)->td_fpop = (fp); \ __retval = (code); \ (td)->td_fpop = __fpop; \ __retval; \ }) static int linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, struct file *file) { struct linux_cdev *ldev; struct linux_file *filp; const struct file_operations *fop; int error; ldev = dev->si_drv1; filp = linux_file_alloc(); filp->f_dentry = &filp->f_dentry_store; filp->f_op = ldev->ops; filp->f_mode = file->f_flag; filp->f_flags = file->f_flag; filp->f_vnode = file->f_vnode; filp->_file = file; refcount_acquire(&ldev->refs); filp->f_cdev = ldev; linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->open != NULL) { error = -fop->open(file->f_vnode, filp); if (error != 0) { linux_drop_fop(ldev); linux_cdev_deref(filp->f_cdev); kfree(filp); return (error); } } /* hold on to the vnode - used for fstat() */ vref(filp->f_vnode); /* release the file from devfs */ finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); linux_drop_fop(ldev); return (ENXIO); } #define LINUX_IOCTL_MIN_PTR 0x10000UL #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) static inline int linux_remap_address(void **uaddr, size_t len) { uintptr_t uaddr_val = (uintptr_t)(*uaddr); if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && uaddr_val < LINUX_IOCTL_MAX_PTR)) { struct task_struct *pts = current; if (pts == NULL) { *uaddr = NULL; return (1); } /* compute data offset */ uaddr_val -= LINUX_IOCTL_MIN_PTR; /* check that length is within bounds */ if ((len > IOCPARM_MAX) || (uaddr_val + len) > pts->bsd_ioctl_len) { *uaddr = NULL; return (1); } /* re-add kernel buffer address */ uaddr_val += (uintptr_t)pts->bsd_ioctl_data; /* update address location */ *uaddr = (void *)uaddr_val; return (1); } return (0); } int linux_copyin(const void *uaddr, void *kaddr, size_t len) { if (linux_remap_address(__DECONST(void **, &uaddr), len)) { if (uaddr == NULL) return (-EFAULT); memcpy(kaddr, uaddr, len); return (0); } return (-copyin(uaddr, kaddr, len)); } int linux_copyout(const void *kaddr, void *uaddr, size_t len) { if (linux_remap_address(&uaddr, len)) { if (uaddr == NULL) return (-EFAULT); memcpy(uaddr, kaddr, len); return (0); } return (-copyout(kaddr, uaddr, len)); } size_t linux_clear_user(void *_uaddr, size_t _len) { uint8_t *uaddr = _uaddr; size_t len = _len; /* make sure uaddr is aligned before going into the fast loop */ while (((uintptr_t)uaddr & 7) != 0 && len > 7) { if (subyte(uaddr, 0)) return (_len); uaddr++; len--; } /* zero 8 bytes at a time */ while (len > 7) { #ifdef __LP64__ if (suword64(uaddr, 0)) return (_len); #else if (suword32(uaddr, 0)) return (_len); if (suword32(uaddr + 4, 0)) return (_len); #endif uaddr += 8; len -= 8; } /* zero fill end, if any */ while (len > 0) { if (subyte(uaddr, 0)) return (_len); uaddr++; len--; } return (0); } int linux_access_ok(const void *uaddr, size_t len) { uintptr_t saddr; uintptr_t eaddr; /* get start and end address */ saddr = (uintptr_t)uaddr; eaddr = (uintptr_t)uaddr + len; /* verify addresses are valid for userspace */ return ((saddr == eaddr) || (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); } /* * This function should return either EINTR or ERESTART depending on * the signal type sent to this thread: */ static int linux_get_error(struct task_struct *task, int error) { /* check for signal type interrupt code */ if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { error = -linux_schedule_get_interrupt_value(task); if (error == 0) error = EINTR; } return (error); } static int linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, const struct file_operations *fop, u_long cmd, caddr_t data, struct thread *td) { struct task_struct *task = current; unsigned size; int error; size = IOCPARM_LEN(cmd); /* refer to logic in sys_ioctl() */ if (size > 0) { /* * Setup hint for linux_copyin() and linux_copyout(). * * Background: Linux code expects a user-space address * while FreeBSD supplies a kernel-space address. */ task->bsd_ioctl_data = data; task->bsd_ioctl_len = size; data = (void *)LINUX_IOCTL_MIN_PTR; } else { /* fetch user-space pointer */ data = *(void **)data; } #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { /* try the compat IOCTL handler first */ if (fop->compat_ioctl != NULL) { error = -OPW(fp, td, fop->compat_ioctl(filp, cmd, (u_long)data)); } else { error = ENOTTY; } /* fallback to the regular IOCTL handler, if any */ if (error == ENOTTY && fop->unlocked_ioctl != NULL) { error = -OPW(fp, td, fop->unlocked_ioctl(filp, cmd, (u_long)data)); } } else #endif { if (fop->unlocked_ioctl != NULL) { error = -OPW(fp, td, fop->unlocked_ioctl(filp, cmd, (u_long)data)); } else { error = ENOTTY; } } if (size > 0) { task->bsd_ioctl_data = NULL; task->bsd_ioctl_len = 0; } if (error == EWOULDBLOCK) { /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); } else { error = linux_get_error(task, error); } return (error); } #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) /* * This function atomically updates the poll wakeup state and returns * the previous state at the time of update. */ static uint8_t linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) { int c, old; c = v->counter; while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) c = old; return (c); } static int linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ }; struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_QUEUED: linux_poll_wakeup(filp); return (1); default: return (0); } } void linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, }; /* check if we are called inside the select system call */ if (p == LINUX_POLL_TABLE_NORMAL) selrecord(curthread, &filp->f_selinfo); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_INIT: /* NOTE: file handles can only belong to one wait-queue */ filp->f_wait_queue.wqh = wqh; filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; add_wait_queue(wqh, &filp->f_wait_queue.wq); atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); break; default: break; } } static void linux_poll_wait_dequeue(struct linux_file *filp) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, }; seldrain(&filp->f_selinfo); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_NOT_READY: case LINUX_FWQ_STATE_QUEUED: case LINUX_FWQ_STATE_READY: remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); break; default: break; } } void linux_poll_wakeup(struct linux_file *filp) { /* this function should be NULL-safe */ if (filp == NULL) return; selwakeup(&filp->f_selinfo); spin_lock(&filp->f_kqlock); filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | LINUX_KQ_FLAG_NEED_WRITE; /* make sure the "knote" gets woken up */ KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); spin_unlock(&filp->f_kqlock); } static struct linux_file * __get_file_rcu(struct linux_file **f) { struct linux_file *file1, *file2; file1 = READ_ONCE(*f); if (file1 == NULL) return (NULL); if (!refcount_acquire_if_not_zero( file1->_file == NULL ? &file1->f_count : &file1->_file->f_count)) return (ERR_PTR(-EAGAIN)); file2 = READ_ONCE(*f); if (file2 == file1) return (file2); fput(file1); return (ERR_PTR(-EAGAIN)); } struct linux_file * linux_get_file_rcu(struct linux_file **f) { struct linux_file *file1; for (;;) { file1 = __get_file_rcu(f); if (file1 == NULL) return (NULL); if (IS_ERR(file1)) continue; return (file1); } } struct linux_file * get_file_active(struct linux_file **f) { struct linux_file *file1; rcu_read_lock(); file1 = __get_file_rcu(f); rcu_read_unlock(); if (IS_ERR(file1)) file1 = NULL; return (file1); } static void linux_file_kqfilter_detach(struct knote *kn) { struct linux_file *filp = kn->kn_hook; spin_lock(&filp->f_kqlock); knlist_remove(&filp->f_selinfo.si_note, kn, 1); spin_unlock(&filp->f_kqlock); } static int linux_file_kqfilter_read_event(struct knote *kn, long hint) { struct linux_file *filp = kn->kn_hook; mtx_assert(&filp->f_kqlock, MA_OWNED); return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); } static int linux_file_kqfilter_write_event(struct knote *kn, long hint) { struct linux_file *filp = kn->kn_hook; mtx_assert(&filp->f_kqlock, MA_OWNED); return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); } static const struct filterops linux_dev_kqfiltops_read = { .f_isfd = 1, .f_detach = linux_file_kqfilter_detach, .f_event = linux_file_kqfilter_read_event, }; static const struct filterops linux_dev_kqfiltops_write = { .f_isfd = 1, .f_detach = linux_file_kqfilter_detach, .f_event = linux_file_kqfilter_write_event, }; static void linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) { struct thread *td; const struct file_operations *fop; struct linux_cdev *ldev; int temp; if ((filp->f_kqflags & kqflags) == 0) return; td = curthread; linux_get_fop(filp, &fop, &ldev); /* get the latest polling state */ temp = OPW(filp->_file, td, fop->poll(filp, NULL)); linux_drop_fop(ldev); spin_lock(&filp->f_kqlock); /* clear kqflags */ filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | LINUX_KQ_FLAG_NEED_WRITE); /* update kqflags */ if ((temp & (POLLIN | POLLOUT)) != 0) { if ((temp & POLLIN) != 0) filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; if ((temp & POLLOUT) != 0) filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; /* make sure the "knote" gets woken up */ KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); } spin_unlock(&filp->f_kqlock); } static int linux_file_kqfilter(struct file *file, struct knote *kn) { struct linux_file *filp; struct thread *td; int error; td = curthread; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; if (filp->f_op->poll == NULL) return (EINVAL); spin_lock(&filp->f_kqlock); switch (kn->kn_filter) { case EVFILT_READ: filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; kn->kn_fop = &linux_dev_kqfiltops_read; kn->kn_hook = filp; knlist_add(&filp->f_selinfo.si_note, kn, 1); error = 0; break; case EVFILT_WRITE: filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; kn->kn_fop = &linux_dev_kqfiltops_write; kn->kn_hook = filp; knlist_add(&filp->f_selinfo.si_note, kn, 1); error = 0; break; default: error = EINVAL; break; } spin_unlock(&filp->f_kqlock); if (error == 0) { linux_set_current(td); /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); } return (error); } static int linux_file_mmap_single(struct file *fp, const struct file_operations *fop, vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot, bool is_shared, struct thread *td) { struct task_struct *task; struct vm_area_struct *vmap; struct mm_struct *mm; struct linux_file *filp; vm_memattr_t attr; int error; filp = (struct linux_file *)fp->f_data; filp->f_flags = fp->f_flag; if (fop->mmap == NULL) return (EOPNOTSUPP); linux_set_current(td); /* * The same VM object might be shared by multiple processes * and the mm_struct is usually freed when a process exits. * * The atomic reference below makes sure the mm_struct is * available as long as the vmap is in the linux_vma_head. */ task = current; mm = task->mm; if (atomic_inc_not_zero(&mm->mm_users) == 0) return (EINVAL); vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); vmap->vm_start = 0; vmap->vm_end = size; vmap->vm_pgoff = *offset / PAGE_SIZE; vmap->vm_pfn = 0; vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); if (is_shared) vmap->vm_flags |= VM_SHARED; vmap->vm_ops = NULL; vmap->vm_file = get_file(filp); vmap->vm_mm = mm; if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { error = linux_get_error(task, EINTR); } else { error = -OPW(fp, td, fop->mmap(filp, vmap)); error = linux_get_error(task, error); up_write(&vmap->vm_mm->mmap_sem); } if (error != 0) { linux_cdev_handle_free(vmap); return (error); } attr = pgprot2cachemode(vmap->vm_page_prot); if (vmap->vm_ops != NULL) { struct vm_area_struct *ptr; void *vm_private_data; bool vm_no_fault; if (vmap->vm_ops->open == NULL || vmap->vm_ops->close == NULL || vmap->vm_private_data == NULL) { /* free allocated VM area struct */ linux_cdev_handle_free(vmap); return (EINVAL); } vm_private_data = vmap->vm_private_data; rw_wlock(&linux_vma_lock); TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { if (ptr->vm_private_data == vm_private_data) break; } /* check if there is an existing VM area struct */ if (ptr != NULL) { /* check if the VM area structure is invalid */ if (ptr->vm_ops == NULL || ptr->vm_ops->open == NULL || ptr->vm_ops->close == NULL) { error = ESTALE; vm_no_fault = 1; } else { error = EEXIST; vm_no_fault = (ptr->vm_ops->fault == NULL); } } else { /* insert VM area structure into list */ TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); error = 0; vm_no_fault = (vmap->vm_ops->fault == NULL); } rw_wunlock(&linux_vma_lock); if (error != 0) { /* free allocated VM area struct */ linux_cdev_handle_free(vmap); /* check for stale VM area struct */ if (error != EEXIST) return (error); } /* check if there is no fault handler */ if (vm_no_fault) { *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, &linux_cdev_pager_ops[1], size, nprot, *offset, td->td_ucred); } else { *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, &linux_cdev_pager_ops[0], size, nprot, *offset, td->td_ucred); } /* check if allocating the VM object failed */ if (*object == NULL) { if (error == 0) { /* remove VM area struct from list */ linux_cdev_handle_remove(vmap); /* free allocated VM area struct */ linux_cdev_handle_free(vmap); } return (EINVAL); } } else { struct sglist *sg; sg = sglist_alloc(1, M_WAITOK); sglist_append_phys(sg, (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, nprot, 0, td->td_ucred); linux_cdev_handle_free(vmap); if (*object == NULL) { sglist_free(sg); return (EINVAL); } } if (attr != VM_MEMATTR_DEFAULT) { VM_OBJECT_WLOCK(*object); vm_object_set_memattr(*object, attr); VM_OBJECT_WUNLOCK(*object); } *offset = 0; return (0); } struct cdevsw linuxcdevsw = { .d_version = D_VERSION, .d_fdopen = linux_dev_fdopen, .d_name = "lkpidev", }; static int linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; ssize_t bytes; int error; error = 0; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; /* XXX no support for I/O vectors currently */ if (uio->uio_iovcnt != 1) return (EOPNOTSUPP); if (uio->uio_resid > DEVFS_IOSIZE_MAX) return (EINVAL); linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->read != NULL) { bytes = OPW(file, td, fop->read(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset)); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; } else { error = linux_get_error(current, -bytes); } } else error = ENXIO; /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); linux_drop_fop(ldev); return (error); } static int linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; ssize_t bytes; int error; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; /* XXX no support for I/O vectors currently */ if (uio->uio_iovcnt != 1) return (EOPNOTSUPP); if (uio->uio_resid > DEVFS_IOSIZE_MAX) return (EINVAL); linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->write != NULL) { bytes = OPW(file, td, fop->write(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset)); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; error = 0; } else { error = linux_get_error(current, -bytes); } } else error = ENXIO; /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); linux_drop_fop(ldev); return (error); } static int linux_file_poll(struct file *file, int events, struct ucred *active_cred, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; int revents; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->poll != NULL) { revents = OPW(file, td, fop->poll(filp, LINUX_POLL_TABLE_NORMAL)) & events; } else { revents = 0; } linux_drop_fop(ldev); return (revents); } static int linux_file_close(struct file *file, struct thread *td) { struct linux_file *filp; int (*release)(struct inode *, struct linux_file *); const struct file_operations *fop; struct linux_cdev *ldev; int error; filp = (struct linux_file *)file->f_data; KASSERT(file_count(filp) == 0, ("File refcount(%d) is not zero", file_count(filp))); if (td == NULL) td = curthread; error = 0; filp->f_flags = file->f_flag; linux_set_current(td); linux_poll_wait_dequeue(filp); linux_get_fop(filp, &fop, &ldev); /* * Always use the real release function, if any, to avoid * leaking device resources: */ release = filp->f_op->release; if (release != NULL) error = -OPW(file, td, release(filp->f_vnode, filp)); funsetown(&filp->f_sigio); if (filp->f_vnode != NULL) vrele(filp->f_vnode); linux_drop_fop(ldev); ldev = filp->f_cdev; if (ldev != NULL) linux_cdev_deref(ldev); linux_synchronize_rcu(RCU_TYPE_REGULAR); kfree(filp); return (error); } static int linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; struct fiodgname_arg *fgn; const char *p; int error, i; error = 0; filp = (struct linux_file *)fp->f_data; filp->f_flags = fp->f_flag; linux_get_fop(filp, &fop, &ldev); linux_set_current(td); switch (cmd) { case FIONBIO: break; case FIOASYNC: if (fop->fasync == NULL) break; error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); break; case FIOSETOWN: error = fsetown(*(int *)data, &filp->f_sigio); if (error == 0) { if (fop->fasync == NULL) break; error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); } break; case FIOGETOWN: *(int *)data = fgetown(&filp->f_sigio); break; case FIODGNAME: #ifdef COMPAT_FREEBSD32 case FIODGNAME_32: #endif if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { error = ENXIO; break; } fgn = data; p = devtoname(filp->f_cdev->cdev); i = strlen(p) + 1; if (i > fgn->len) { error = EINVAL; break; } error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); break; default: error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); break; } linux_drop_fop(ldev); return (error); } static int linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, vm_prot_t maxprot, int flags, struct file *fp, vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) { /* * Character devices do not provide private mappings * of any kind: */ if ((maxprot & VM_PROT_WRITE) == 0 && (prot & VM_PROT_WRITE) != 0) return (EACCES); if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) return (EINVAL); return (linux_file_mmap_single(fp, fop, foff, objsize, objp, (int)prot, (flags & MAP_SHARED) ? true : false, td)); } static int linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; struct mount *mp; struct vnode *vp; vm_object_t object; vm_prot_t maxprot; int error; filp = (struct linux_file *)fp->f_data; vp = filp->f_vnode; if (vp == NULL) return (EOPNOTSUPP); /* * Ensure that file and memory protections are * compatible. */ mp = vp->v_mount; if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { maxprot = VM_PROT_NONE; if ((prot & VM_PROT_EXECUTE) != 0) return (EACCES); } else maxprot = VM_PROT_EXECUTE; if ((fp->f_flag & FREAD) != 0) maxprot |= VM_PROT_READ; else if ((prot & VM_PROT_READ) != 0) return (EACCES); /* * If we are sharing potential changes via MAP_SHARED and we * are trying to get write permission although we opened it * without asking for it, bail out. * * Note that most character devices always share mappings. * * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE * requests rather than doing it here. */ if ((flags & MAP_SHARED) != 0) { if ((fp->f_flag & FWRITE) != 0) maxprot |= VM_PROT_WRITE; else if ((prot & VM_PROT_WRITE) != 0) return (EACCES); } maxprot &= cap_maxprot; linux_get_fop(filp, &fop, &ldev); error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, &foff, fop, &object); if (error != 0) goto out; error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, foff, FALSE, td); if (error != 0) vm_object_deallocate(object); out: linux_drop_fop(ldev); return (error); } static int linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) { struct linux_file *filp; struct vnode *vp; int error; filp = (struct linux_file *)fp->f_data; if (filp->f_vnode == NULL) return (EOPNOTSUPP); vp = filp->f_vnode; vn_lock(vp, LK_SHARED | LK_RETRY); error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED); VOP_UNLOCK(vp); return (error); } static int linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { struct linux_file *filp; struct vnode *vp; int error; filp = fp->f_data; vp = filp->f_vnode; if (vp == NULL) { error = 0; kif->kf_type = KF_TYPE_DEV; } else { vref(vp); FILEDESC_SUNLOCK(fdp); error = vn_fill_kinfo_vnode(vp, kif); vrele(vp); kif->kf_type = KF_TYPE_VNODE; FILEDESC_SLOCK(fdp); } return (error); } unsigned int linux_iminor(struct inode *inode) { struct linux_cdev *ldev; if (inode == NULL || inode->v_rdev == NULL || inode->v_rdev->si_devsw != &linuxcdevsw) return (-1U); ldev = inode->v_rdev->si_drv1; if (ldev == NULL) return (-1U); return (minor(ldev->dev)); } static int linux_file_kcmp(struct file *fp1, struct file *fp2, struct thread *td) { struct linux_file *filp1, *filp2; if (fp2->f_type != DTYPE_DEV) return (3); filp1 = fp1->f_data; filp2 = fp2->f_data; return (kcmp_cmp((uintptr_t)filp1->f_cdev, (uintptr_t)filp2->f_cdev)); } const struct fileops linuxfileops = { .fo_read = linux_file_read, .fo_write = linux_file_write, .fo_truncate = invfo_truncate, .fo_kqfilter = linux_file_kqfilter, .fo_stat = linux_file_stat, .fo_fill_kinfo = linux_file_fill_kinfo, .fo_poll = linux_file_poll, .fo_close = linux_file_close, .fo_ioctl = linux_file_ioctl, .fo_mmap = linux_file_mmap, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, .fo_cmp = linux_file_kcmp, .fo_flags = DFLAG_PASSABLE, }; /* * Hash of vmmap addresses. This is infrequently accessed and does not * need to be particularly large. This is done because we must store the * caller's idea of the map size to properly unmap. */ struct vmmap { LIST_ENTRY(vmmap) vm_next; void *vm_addr; unsigned long vm_size; }; struct vmmaphd { struct vmmap *lh_first; }; #define VMMAP_HASH_SIZE 64 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; static struct mtx vmmaplock; static void vmmap_add(void *addr, unsigned long size) { struct vmmap *vmmap; vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); mtx_lock(&vmmaplock); vmmap->vm_size = size; vmmap->vm_addr = addr; LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); mtx_unlock(&vmmaplock); } static struct vmmap * vmmap_remove(void *addr) { struct vmmap *vmmap; mtx_lock(&vmmaplock); LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) if (vmmap->vm_addr == addr) break; if (vmmap) LIST_REMOVE(vmmap, vm_next); mtx_unlock(&vmmaplock); return (vmmap); } #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) void * _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) { void *addr; addr = pmap_mapdev_attr(phys_addr, size, attr); if (addr == NULL) return (NULL); vmmap_add(addr, size); return (addr); } #endif void iounmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) pmap_unmapdev(addr, vmmap->vm_size); #endif kfree(vmmap); } void * vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) { vm_offset_t off; size_t size; size = count * PAGE_SIZE; off = kva_alloc(size); if (off == 0) return (NULL); vmmap_add((void *)off, size); pmap_qenter(off, pages, count); return ((void *)off); } void vunmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); kva_free((vm_offset_t)addr, vmmap->vm_size); kfree(vmmap); } static char * devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) { unsigned int len; char *p; va_list aq; va_copy(aq, ap); len = vsnprintf(NULL, 0, fmt, aq); va_end(aq); if (dev != NULL) p = devm_kmalloc(dev, len + 1, gfp); else p = kmalloc(len + 1, gfp); if (p != NULL) vsnprintf(p, len + 1, fmt, ap); return (p); } char * kvasprintf(gfp_t gfp, const char *fmt, va_list ap) { return (devm_kvasprintf(NULL, gfp, fmt, ap)); } char * lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = devm_kvasprintf(dev, gfp, fmt, ap); va_end(ap); return (p); } char * kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = kvasprintf(gfp, fmt, ap); va_end(ap); return (p); } int __lkpi_hexdump_printf(void *arg1 __unused, const char *fmt, ...) { va_list ap; int result; va_start(ap, fmt); result = vprintf(fmt, ap); va_end(ap); return (result); } int __lkpi_hexdump_sbuf_printf(void *arg1, const char *fmt, ...) { va_list ap; int result; va_start(ap, fmt); result = sbuf_vprintf(arg1, fmt, ap); va_end(ap); return (result); } void lkpi_hex_dump(int(*_fpf)(void *, const char *, ...), void *arg1, const char *level, const char *prefix_str, const int prefix_type, const int rowsize, const int groupsize, const void *buf, size_t len, const bool ascii) { typedef const struct { long long value; } __packed *print_64p_t; typedef const struct { uint32_t value; } __packed *print_32p_t; typedef const struct { uint16_t value; } __packed *print_16p_t; const void *buf_old = buf; int row; while (len > 0) { if (level != NULL) _fpf(arg1, "%s", level); if (prefix_str != NULL) _fpf(arg1, "%s ", prefix_str); switch (prefix_type) { case DUMP_PREFIX_ADDRESS: _fpf(arg1, "[%p] ", buf); break; case DUMP_PREFIX_OFFSET: _fpf(arg1, "[%#tx] ", ((const char *)buf - (const char *)buf_old)); break; default: break; } for (row = 0; row != rowsize; row++) { if (groupsize == 8 && len > 7) { _fpf(arg1, "%016llx ", ((print_64p_t)buf)->value); buf = (const uint8_t *)buf + 8; len -= 8; } else if (groupsize == 4 && len > 3) { _fpf(arg1, "%08x ", ((print_32p_t)buf)->value); buf = (const uint8_t *)buf + 4; len -= 4; } else if (groupsize == 2 && len > 1) { _fpf(arg1, "%04x ", ((print_16p_t)buf)->value); buf = (const uint8_t *)buf + 2; len -= 2; } else if (len > 0) { _fpf(arg1, "%02x ", *(const uint8_t *)buf); buf = (const uint8_t *)buf + 1; len--; } else { break; } } _fpf(arg1, "\n"); } } static void linux_timer_callback_wrapper(void *context) { struct timer_list *timer; timer = context; /* the timer is about to be shutdown permanently */ if (timer->function == NULL) return; if (linux_set_current_flags(curthread, M_NOWAIT)) { /* try again later */ callout_reset(&timer->callout, 1, &linux_timer_callback_wrapper, timer); return; } timer->function(timer->data); } int -mod_timer(struct timer_list *timer, int expires) +mod_timer(struct timer_list *timer, unsigned long expires) { int ret; timer->expires = expires; ret = callout_reset(&timer->callout, linux_timer_jiffies_until(expires), &linux_timer_callback_wrapper, timer); MPASS(ret == 0 || ret == 1); return (ret == 1); } void add_timer(struct timer_list *timer) { callout_reset(&timer->callout, linux_timer_jiffies_until(timer->expires), &linux_timer_callback_wrapper, timer); } void add_timer_on(struct timer_list *timer, int cpu) { callout_reset_on(&timer->callout, linux_timer_jiffies_until(timer->expires), &linux_timer_callback_wrapper, timer, cpu); } int del_timer(struct timer_list *timer) { if (callout_stop(&(timer)->callout) == -1) return (0); return (1); } int del_timer_sync(struct timer_list *timer) { if (callout_drain(&(timer)->callout) == -1) return (0); return (1); } int timer_delete_sync(struct timer_list *timer) { return (del_timer_sync(timer)); } int timer_shutdown_sync(struct timer_list *timer) { timer->function = NULL; return (del_timer_sync(timer)); } /* greatest common divisor, Euclid equation */ static uint64_t lkpi_gcd_64(uint64_t a, uint64_t b) { uint64_t an; uint64_t bn; while (b != 0) { an = b; bn = a % b; a = an; b = bn; } return (a); } uint64_t lkpi_nsec2hz_rem; uint64_t lkpi_nsec2hz_div = 1000000000ULL; uint64_t lkpi_nsec2hz_max; uint64_t lkpi_usec2hz_rem; uint64_t lkpi_usec2hz_div = 1000000ULL; uint64_t lkpi_usec2hz_max; uint64_t lkpi_msec2hz_rem; uint64_t lkpi_msec2hz_div = 1000ULL; uint64_t lkpi_msec2hz_max; static void linux_timer_init(void *arg) { uint64_t gcd; /* * Compute an internal HZ value which can divide 2**32 to * avoid timer rounding problems when the tick value wraps * around 2**32: */ linux_timer_hz_mask = 1; while (linux_timer_hz_mask < (unsigned long)hz) linux_timer_hz_mask *= 2; linux_timer_hz_mask--; /* compute some internal constants */ lkpi_nsec2hz_rem = hz; lkpi_usec2hz_rem = hz; lkpi_msec2hz_rem = hz; gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); lkpi_nsec2hz_rem /= gcd; lkpi_nsec2hz_div /= gcd; lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); lkpi_usec2hz_rem /= gcd; lkpi_usec2hz_div /= gcd; lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); lkpi_msec2hz_rem /= gcd; lkpi_msec2hz_div /= gcd; lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; } SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); void linux_complete_common(struct completion *c, int all) { sleepq_lock(c); if (all) { c->done = UINT_MAX; sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); } else { if (c->done != UINT_MAX) c->done++; sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); } sleepq_release(c); } /* * Indefinite wait for done != 0 with or without signals. */ int linux_wait_for_common(struct completion *c, int flags) { struct task_struct *task; int error; if (SCHEDULER_STOPPED()) return (0); task = current; if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; error = 0; for (;;) { sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); if (flags & SLEEPQ_INTERRUPTIBLE) { DROP_GIANT(); error = -sleepq_wait_sig(c, 0); PICKUP_GIANT(); if (error != 0) { linux_schedule_save_interrupt_value(task, error); error = -ERESTARTSYS; goto intr; } } else { DROP_GIANT(); sleepq_wait(c, 0); PICKUP_GIANT(); } } if (c->done != UINT_MAX) c->done--; sleepq_release(c); intr: return (error); } /* * Time limited wait for done != 0 with or without signals. */ -int -linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) +unsigned long +linux_wait_for_timeout_common(struct completion *c, unsigned long timeout, + int flags) { struct task_struct *task; - int end = jiffies + timeout; - int error; + unsigned long end = jiffies + timeout, error; if (SCHEDULER_STOPPED()) return (0); task = current; if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; for (;;) { sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); sleepq_set_timeout(c, linux_timer_jiffies_until(end)); DROP_GIANT(); if (flags & SLEEPQ_INTERRUPTIBLE) error = -sleepq_timedwait_sig(c, 0); else error = -sleepq_timedwait(c, 0); PICKUP_GIANT(); if (error != 0) { /* check for timeout */ if (error == -EWOULDBLOCK) { error = 0; /* timeout */ } else { /* signal happened */ linux_schedule_save_interrupt_value(task, error); error = -ERESTARTSYS; } goto done; } } if (c->done != UINT_MAX) c->done--; sleepq_release(c); /* return how many jiffies are left */ error = linux_timer_jiffies_until(end); done: return (error); } int linux_try_wait_for_completion(struct completion *c) { int isdone; sleepq_lock(c); isdone = (c->done != 0); if (c->done != 0 && c->done != UINT_MAX) c->done--; sleepq_release(c); return (isdone); } int linux_completion_done(struct completion *c) { int isdone; sleepq_lock(c); isdone = (c->done != 0); sleepq_release(c); return (isdone); } static void linux_cdev_deref(struct linux_cdev *ldev) { if (refcount_release(&ldev->refs) && ldev->kobj.ktype == &linux_cdev_ktype) kfree(ldev); } static void linux_cdev_release(struct kobject *kobj) { struct linux_cdev *cdev; struct kobject *parent; cdev = container_of(kobj, struct linux_cdev, kobj); parent = kobj->parent; linux_destroy_dev(cdev); linux_cdev_deref(cdev); kobject_put(parent); } static void linux_cdev_static_release(struct kobject *kobj) { struct cdev *cdev; struct linux_cdev *ldev; ldev = container_of(kobj, struct linux_cdev, kobj); cdev = ldev->cdev; if (cdev != NULL) { destroy_dev(cdev); ldev->cdev = NULL; } kobject_put(kobj->parent); } int linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) { int ret; if (dev->devt != 0) { /* Set parent kernel object. */ ldev->kobj.parent = &dev->kobj; /* * Unlike Linux we require the kobject of the * character device structure to have a valid name * before calling this function: */ if (ldev->kobj.name == NULL) return (-EINVAL); ret = cdev_add(ldev, dev->devt, 1); if (ret) return (ret); } ret = device_add(dev); if (ret != 0 && dev->devt != 0) cdev_del(ldev); return (ret); } void linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) { device_del(dev); if (dev->devt != 0) cdev_del(ldev); } static void linux_destroy_dev(struct linux_cdev *ldev) { if (ldev->cdev == NULL) return; MPASS((ldev->siref & LDEV_SI_DTR) == 0); MPASS(ldev->kobj.ktype == &linux_cdev_ktype); atomic_set_int(&ldev->siref, LDEV_SI_DTR); while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) pause("ldevdtr", hz / 4); destroy_dev(ldev->cdev); ldev->cdev = NULL; } const struct kobj_type linux_cdev_ktype = { .release = linux_cdev_release, }; const struct kobj_type linux_cdev_static_ktype = { .release = linux_cdev_static_release, }; static void linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; if (linkstate == LINK_STATE_UP) nb->notifier_call(nb, NETDEV_UP, &ni); else nb->notifier_call(nb, NETDEV_DOWN, &ni); } static void linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; nb->notifier_call(nb, NETDEV_REGISTER, &ni); } static void linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); } static void linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); } static void linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); } int register_netdevice_notifier(struct notifier_block *nb) { nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( iflladdr_event, linux_handle_iflladdr_event, nb, 0); return (0); } int register_inetaddr_notifier(struct notifier_block *nb) { nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( ifaddr_event, linux_handle_ifaddr_event, nb, 0); return (0); } int unregister_netdevice_notifier(struct notifier_block *nb) { EVENTHANDLER_DEREGISTER(ifnet_link_event, nb->tags[NETDEV_UP]); EVENTHANDLER_DEREGISTER(ifnet_arrival_event, nb->tags[NETDEV_REGISTER]); EVENTHANDLER_DEREGISTER(ifnet_departure_event, nb->tags[NETDEV_UNREGISTER]); EVENTHANDLER_DEREGISTER(iflladdr_event, nb->tags[NETDEV_CHANGEADDR]); return (0); } int unregister_inetaddr_notifier(struct notifier_block *nb) { EVENTHANDLER_DEREGISTER(ifaddr_event, nb->tags[NETDEV_CHANGEIFADDR]); return (0); } struct list_sort_thunk { int (*cmp)(void *, struct list_head *, struct list_head *); void *priv; }; static inline int linux_le_cmp(const void *d1, const void *d2, void *priv) { struct list_head *le1, *le2; struct list_sort_thunk *thunk; thunk = priv; le1 = *(__DECONST(struct list_head **, d1)); le2 = *(__DECONST(struct list_head **, d2)); return ((thunk->cmp)(thunk->priv, le1, le2)); } void list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, struct list_head *a, struct list_head *b)) { struct list_sort_thunk thunk; struct list_head **ar, *le; size_t count, i; count = 0; list_for_each(le, head) count++; ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); i = 0; list_for_each(le, head) ar[i++] = le; thunk.cmp = cmp; thunk.priv = priv; qsort_r(ar, count, sizeof(struct list_head *), linux_le_cmp, &thunk); INIT_LIST_HEAD(head); for (i = 0; i < count; i++) list_add_tail(ar[i], head); free(ar, M_KMALLOC); } #if defined(__i386__) || defined(__amd64__) int linux_wbinvd_on_all_cpus(void) { pmap_invalidate_cache(); return (0); } #endif int linux_on_each_cpu(void callback(void *), void *data) { smp_rendezvous(smp_no_rendezvous_barrier, callback, smp_no_rendezvous_barrier, data); return (0); } int linux_in_atomic(void) { return ((curthread->td_pflags & TDP_NOFAULTING) != 0); } struct linux_cdev * linux_find_cdev(const char *name, unsigned major, unsigned minor) { dev_t dev = MKDEV(major, minor); struct cdev *cdev; dev_lock(); LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { struct linux_cdev *ldev = cdev->si_drv1; if (ldev->dev == dev && strcmp(kobject_name(&ldev->kobj), name) == 0) { break; } } dev_unlock(); return (cdev != NULL ? cdev->si_drv1 : NULL); } int __register_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops) { struct linux_cdev *cdev; int ret = 0; int i; for (i = baseminor; i < baseminor + count; i++) { cdev = cdev_alloc(); cdev->ops = fops; kobject_set_name(&cdev->kobj, name); ret = cdev_add(cdev, makedev(major, i), 1); if (ret != 0) break; } return (ret); } int __register_chrdev_p(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops, uid_t uid, gid_t gid, int mode) { struct linux_cdev *cdev; int ret = 0; int i; for (i = baseminor; i < baseminor + count; i++) { cdev = cdev_alloc(); cdev->ops = fops; kobject_set_name(&cdev->kobj, name); ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); if (ret != 0) break; } return (ret); } void __unregister_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name) { struct linux_cdev *cdevp; int i; for (i = baseminor; i < baseminor + count; i++) { cdevp = linux_find_cdev(name, major, i); if (cdevp != NULL) cdev_del(cdevp); } } void linux_dump_stack(void) { #ifdef STACK struct stack st; stack_save(&st); stack_print(&st); #endif } int linuxkpi_net_ratelimit(void) { return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, lkpi_net_maxpps)); } struct io_mapping * io_mapping_create_wc(resource_size_t base, unsigned long size) { struct io_mapping *mapping; mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); if (mapping == NULL) return (NULL); return (io_mapping_init_wc(mapping, base, size)); } /* We likely want a linuxkpi_device.c at some point. */ bool device_can_wakeup(struct device *dev) { if (dev == NULL) return (false); /* * XXX-BZ iwlwifi queries it as part of enabling WoWLAN. * Normally this would be based on a bool in dev->power.XXX. * Check such as PCI PCIM_PCAP_*PME. We have no way to enable this yet. * We may get away by directly calling into bsddev for as long as * we can assume PCI only avoiding changing struct device breaking KBI. */ pr_debug("%s:%d: not enabled; see comment.\n", __func__, __LINE__); return (false); } static void devm_device_group_remove(struct device *dev, void *p) { const struct attribute_group **dr = p; const struct attribute_group *group = *dr; sysfs_remove_group(&dev->kobj, group); } int lkpi_devm_device_add_group(struct device *dev, const struct attribute_group *group) { const struct attribute_group **dr; int ret; dr = devres_alloc(devm_device_group_remove, sizeof(*dr), GFP_KERNEL); if (dr == NULL) return (-ENOMEM); ret = sysfs_create_group(&dev->kobj, group); if (ret == 0) { *dr = group; devres_add(dev, dr); } else devres_free(dr); return (ret); } #if defined(__i386__) || defined(__amd64__) bool linux_cpu_has_clflush; struct cpuinfo_x86 boot_cpu_data; struct cpuinfo_x86 *__cpu_data; #endif cpumask_t * lkpi_get_static_single_cpu_mask(int cpuid) { KASSERT((cpuid >= 0 && cpuid <= mp_maxid), ("%s: invalid cpuid %d\n", __func__, cpuid)); KASSERT(!CPU_ABSENT(cpuid), ("%s: cpu with cpuid %d is absent\n", __func__, cpuid)); return (static_single_cpu_mask[cpuid]); } bool lkpi_xen_initial_domain(void) { #ifdef XENHVM return (xen_initial_domain()); #else return (false); #endif } bool lkpi_xen_pv_domain(void) { #ifdef XENHVM return (xen_pv_domain()); #else return (false); #endif } static void linux_compat_init(void *arg) { struct sysctl_oid *rootoid; int i; #if defined(__i386__) || defined(__amd64__) static const uint32_t x86_vendors[X86_VENDOR_NUM] = { [X86_VENDOR_INTEL] = CPU_VENDOR_INTEL, [X86_VENDOR_CYRIX] = CPU_VENDOR_CYRIX, [X86_VENDOR_AMD] = CPU_VENDOR_AMD, [X86_VENDOR_UMC] = CPU_VENDOR_UMC, [X86_VENDOR_CENTAUR] = CPU_VENDOR_CENTAUR, [X86_VENDOR_TRANSMETA] = CPU_VENDOR_TRANSMETA, [X86_VENDOR_NSC] = CPU_VENDOR_NSC, [X86_VENDOR_HYGON] = CPU_VENDOR_HYGON, }; uint8_t x86_vendor = X86_VENDOR_UNKNOWN; for (i = 0; i < X86_VENDOR_NUM; i++) { if (cpu_vendor_id != 0 && cpu_vendor_id == x86_vendors[i]) { x86_vendor = i; break; } } linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); boot_cpu_data.x86_clflush_size = cpu_clflush_line_size; boot_cpu_data.x86_max_cores = mp_ncpus; boot_cpu_data.x86 = CPUID_TO_FAMILY(cpu_id); boot_cpu_data.x86_model = CPUID_TO_MODEL(cpu_id); boot_cpu_data.x86_vendor = x86_vendor; __cpu_data = kmalloc_array(mp_maxid + 1, sizeof(*__cpu_data), M_WAITOK | M_ZERO); CPU_FOREACH(i) { __cpu_data[i].x86_clflush_size = cpu_clflush_line_size; __cpu_data[i].x86_max_cores = mp_ncpus; __cpu_data[i].x86 = CPUID_TO_FAMILY(cpu_id); __cpu_data[i].x86_model = CPUID_TO_MODEL(cpu_id); __cpu_data[i].x86_vendor = x86_vendor; } #endif rw_init(&linux_vma_lock, "lkpi-vma-lock"); rootoid = SYSCTL_ADD_ROOT_NODE(NULL, OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); kobject_init(&linux_class_root, &linux_class_ktype); kobject_set_name(&linux_class_root, "class"); linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); kobject_init(&linux_root_device.kobj, &linux_dev_ktype); kobject_set_name(&linux_root_device.kobj, "device"); linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); linux_root_device.bsddev = root_bus; linux_class_misc.name = "misc"; class_register(&linux_class_misc); INIT_LIST_HEAD(&pci_drivers); INIT_LIST_HEAD(&pci_devices); spin_lock_init(&pci_lock); mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); for (i = 0; i < VMMAP_HASH_SIZE; i++) LIST_INIT(&vmmaphead[i]); init_waitqueue_head(&linux_bit_waitq); init_waitqueue_head(&linux_var_waitq); CPU_COPY(&all_cpus, &cpu_online_mask); /* * Generate a single-CPU cpumask_t for each CPU (possibly) in the system. * CPUs are indexed from 0..(mp_maxid). The entry for cpuid 0 will only * have itself in the cpumask, cupid 1 only itself on entry 1, and so on. * This is used by cpumask_of() (and possibly others in the future) for, * e.g., drivers to pass hints to irq_set_affinity_hint(). */ static_single_cpu_mask = kmalloc_array(mp_maxid + 1, sizeof(static_single_cpu_mask), M_WAITOK | M_ZERO); /* * When the number of CPUs reach a threshold, we start to save memory * given the sets are static by overlapping those having their single * bit set at same position in a bitset word. Asymptotically, this * regular scheme is in O(n²) whereas the overlapping one is in O(n) * only with n being the maximum number of CPUs, so the gain will become * huge quite quickly. The threshold for 64-bit architectures is 128 * CPUs. */ if (mp_ncpus < (2 * _BITSET_BITS)) { cpumask_t *sscm_ptr; /* * This represents 'mp_ncpus * __bitset_words(CPU_SETSIZE) * * (_BITSET_BITS / 8)' bytes (for comparison with the * overlapping scheme). */ static_single_cpu_mask_lcs = kmalloc_array(mp_ncpus, sizeof(*static_single_cpu_mask_lcs), M_WAITOK | M_ZERO); sscm_ptr = static_single_cpu_mask_lcs; CPU_FOREACH(i) { static_single_cpu_mask[i] = sscm_ptr++; CPU_SET(i, static_single_cpu_mask[i]); } } else { /* Pointer to a bitset word. */ __typeof(((cpuset_t *)NULL)->__bits[0]) *bwp; /* * Allocate memory for (static) spans of 'cpumask_t' ('cpuset_t' * really) with a single bit set that can be reused for all * single CPU masks by making them start at different offsets. * We need '__bitset_words(CPU_SETSIZE) - 1' bitset words before * the word having its single bit set, and the same amount * after. */ static_single_cpu_mask_lcs = mallocarray(_BITSET_BITS, (2 * __bitset_words(CPU_SETSIZE) - 1) * (_BITSET_BITS / 8), M_KMALLOC, M_WAITOK | M_ZERO); /* * We rely below on cpuset_t and the bitset generic * implementation assigning words in the '__bits' array in the * same order of bits (i.e., little-endian ordering, not to be * confused with machine endianness, which concerns bits in * words and other integers). This is an imperfect test, but it * will detect a change to big-endian ordering. */ _Static_assert( __bitset_word(_BITSET_BITS + 1, _BITSET_BITS) == 1, "Assumes a bitset implementation that is little-endian " "on its words"); /* Initialize the single bit of each static span. */ bwp = (__typeof(bwp))static_single_cpu_mask_lcs + (__bitset_words(CPU_SETSIZE) - 1); for (i = 0; i < _BITSET_BITS; i++) { CPU_SET(i, (cpuset_t *)bwp); bwp += (2 * __bitset_words(CPU_SETSIZE) - 1); } /* * Finally set all CPU masks to the proper word in their * relevant span. */ CPU_FOREACH(i) { bwp = (__typeof(bwp))static_single_cpu_mask_lcs; /* Find the non-zero word of the relevant span. */ bwp += (2 * __bitset_words(CPU_SETSIZE) - 1) * (i % _BITSET_BITS) + __bitset_words(CPU_SETSIZE) - 1; /* Shift to find the CPU mask start. */ bwp -= (i / _BITSET_BITS); static_single_cpu_mask[i] = (cpuset_t *)bwp; } } strlcpy(init_uts_ns.name.release, osrelease, sizeof(init_uts_ns.name.release)); } SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); static void linux_compat_uninit(void *arg) { linux_kobject_kfree_name(&linux_class_root); linux_kobject_kfree_name(&linux_root_device.kobj); linux_kobject_kfree_name(&linux_class_misc.kobj); free(static_single_cpu_mask_lcs, M_KMALLOC); free(static_single_cpu_mask, M_KMALLOC); #if defined(__i386__) || defined(__amd64__) free(__cpu_data, M_KMALLOC); #endif mtx_destroy(&vmmaplock); spin_lock_destroy(&pci_lock); rw_destroy(&linux_vma_lock); } SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); /* * NOTE: Linux frequently uses "unsigned long" for pointer to integer * conversion and vice versa, where in FreeBSD "uintptr_t" would be * used. Assert these types have the same size, else some parts of the * LinuxKPI may not work like expected: */ CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); diff --git a/sys/compat/linuxkpi/common/src/linux_netdev.c b/sys/compat/linuxkpi/common/src/linux_netdev.c index c36684f9fd97..ce9153614104 100644 --- a/sys/compat/linuxkpi/common/src/linux_netdev.c +++ b/sys/compat/linuxkpi/common/src/linux_netdev.c @@ -1,436 +1,436 @@ /*- * Copyright (c) 2021 The FreeBSD Foundation * Copyright (c) 2022 Bjoern A. Zeeb * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include MALLOC_DEFINE(M_NETDEV, "lkpindev", "Linux KPI netdevice compat"); #define NAPI_LOCK_INIT(_ndev) \ mtx_init(&(_ndev)->napi_mtx, "napi_mtx", NULL, MTX_DEF) #define NAPI_LOCK_DESTROY(_ndev) mtx_destroy(&(_ndev)->napi_mtx) #define NAPI_LOCK_ASSERT(_ndev) mtx_assert(&(_ndev)->napi_mtx, MA_OWNED) #define NAPI_LOCK(_ndev) mtx_lock(&(_ndev)->napi_mtx) #define NAPI_UNLOCK(_ndev) mtx_unlock(&(_ndev)->napi_mtx) /* -------------------------------------------------------------------------- */ #define LKPI_NAPI_FLAGS \ "\20\1DISABLE_PENDING\2IS_SCHEDULED\3LOST_RACE_TRY_AGAIN" /* #define NAPI_DEBUG */ #ifdef NAPI_DEBUG static int debug_napi; SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug_napi, CTLFLAG_RWTUN, &debug_napi, 0, "NAPI debug level"); #define DNAPI_TODO 0x01 #define DNAPI_IMPROVE 0x02 #define DNAPI_TRACE 0x10 #define DNAPI_TRACE_TASK 0x20 #define DNAPI_DIRECT_DISPATCH 0x1000 #define NAPI_TRACE(_n) if (debug_napi & DNAPI_TRACE) \ - printf("NAPI_TRACE %s:%d %u %p (%#jx %b)\n", __func__, __LINE__, \ - (unsigned int)ticks, _n, (uintmax_t)(_n)->state, \ + printf("NAPI_TRACE %s:%d %lu %p (%#jx %b)\n", __func__, __LINE__, \ + jiffies, _n, (uintmax_t)(_n)->state, \ (int)(_n)->state, LKPI_NAPI_FLAGS) #define NAPI_TRACE2D(_n, _d) if (debug_napi & DNAPI_TRACE) \ - printf("NAPI_TRACE %s:%d %u %p (%#jx %b) %d\n", __func__, __LINE__, \ - (unsigned int)ticks, _n, (uintmax_t)(_n)->state, \ + printf("NAPI_TRACE %s:%d %lu %p (%#jx %b) %d\n", __func__, __LINE__, \ + jiffies, _n, (uintmax_t)(_n)->state, \ (int)(_n)->state, LKPI_NAPI_FLAGS, _d) #define NAPI_TRACE_TASK(_n, _p, _c) if (debug_napi & DNAPI_TRACE_TASK) \ - printf("NAPI_TRACE %s:%d %u %p (%#jx %b) pending %d count %d " \ + printf("NAPI_TRACE %s:%d %lu %p (%#jx %b) pending %d count %d " \ "rx_count %d\n", __func__, __LINE__, \ - (unsigned int)ticks, _n, (uintmax_t)(_n)->state, \ + jiffies, _n, (uintmax_t)(_n)->state, \ (int)(_n)->state, LKPI_NAPI_FLAGS, _p, _c, (_n)->rx_count) #define NAPI_TODO() if (debug_napi & DNAPI_TODO) \ - printf("NAPI_TODO %s:%d %d\n", __func__, __LINE__, ticks) + printf("NAPI_TODO %s:%d %lu\n", __func__, __LINE__, jiffies) #define NAPI_IMPROVE() if (debug_napi & DNAPI_IMPROVE) \ - printf("NAPI_IMPROVE %s:%d %d\n", __func__, __LINE__, ticks) + printf("NAPI_IMPROVE %s:%d %lu\n", __func__, __LINE__, jiffies) #define NAPI_DIRECT_DISPATCH() ((debug_napi & DNAPI_DIRECT_DISPATCH) != 0) #else #define NAPI_TRACE(_n) do { } while(0) #define NAPI_TRACE2D(_n, _d) do { } while(0) #define NAPI_TRACE_TASK(_n, _p, _c) do { } while(0) #define NAPI_TODO() do { } while(0) #define NAPI_IMPROVE() do { } while(0) #define NAPI_DIRECT_DISPATCH() (0) #endif /* -------------------------------------------------------------------------- */ /* * Check if a poll is running or can run and and if the latter * make us as running. That way we ensure that only one poll * can only ever run at the same time. Returns true if no poll * was scheduled yet. */ bool linuxkpi_napi_schedule_prep(struct napi_struct *napi) { unsigned long old, new; NAPI_TRACE(napi); /* Can can only update/return if all flags agree. */ do { old = READ_ONCE(napi->state); /* If we are stopping, cannot run again. */ if ((old & BIT(LKPI_NAPI_FLAG_DISABLE_PENDING)) != 0) { NAPI_TRACE(napi); return (false); } new = old; /* We were already scheduled. Need to try again? */ if ((old & BIT(LKPI_NAPI_FLAG_IS_SCHEDULED)) != 0) new |= BIT(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN); new |= BIT(LKPI_NAPI_FLAG_IS_SCHEDULED); } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0); NAPI_TRACE(napi); return ((old & BIT(LKPI_NAPI_FLAG_IS_SCHEDULED)) == 0); } static void lkpi___napi_schedule_dd(struct napi_struct *napi) { unsigned long old, new; int rc; rc = 0; again: NAPI_TRACE2D(napi, rc); if (napi->poll != NULL) rc = napi->poll(napi, napi->budget); napi->rx_count += rc; /* Check if interrupts are still disabled, more work to do. */ /* Bandaid for now. */ if (rc >= napi->budget) goto again; /* Bandaid for now. */ if (test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->state)) goto again; do { new = old = READ_ONCE(napi->state); clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &new); clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &new); } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0); NAPI_TRACE2D(napi, rc); } void linuxkpi___napi_schedule(struct napi_struct *napi) { int rc; NAPI_TRACE(napi); if (test_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state)) { clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &napi->state); clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state); NAPI_TRACE(napi); return; } if (NAPI_DIRECT_DISPATCH()) { lkpi___napi_schedule_dd(napi); } else { rc = taskqueue_enqueue(napi->dev->napi_tq, &napi->napi_task); NAPI_TRACE2D(napi, rc); if (rc != 0) { /* Should we assert EPIPE? */ return; } } } bool linuxkpi_napi_schedule(struct napi_struct *napi) { NAPI_TRACE(napi); /* * iwlwifi calls this sequence instead of napi_schedule() * to be able to test the prep result. */ if (napi_schedule_prep(napi)) { __napi_schedule(napi); return (true); } return (false); } void linuxkpi_napi_reschedule(struct napi_struct *napi) { NAPI_TRACE(napi); /* Not sure what is different to napi_schedule yet. */ if (napi_schedule_prep(napi)) __napi_schedule(napi); } bool linuxkpi_napi_complete_done(struct napi_struct *napi, int ret) { unsigned long old, new; NAPI_TRACE(napi); if (NAPI_DIRECT_DISPATCH()) return (true); do { new = old = READ_ONCE(napi->state); /* * If we lost a race before, we need to re-schedule. * Leave IS_SCHEDULED set essentially doing "_prep". */ if (!test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &old)) clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &new); clear_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &new); } while (atomic_cmpset_acq_long(&napi->state, old, new) == 0); NAPI_TRACE(napi); /* Someone tried to schedule while poll was running. Re-sched. */ if (test_bit(LKPI_NAPI_FLAG_LOST_RACE_TRY_AGAIN, &old)) { __napi_schedule(napi); return (false); } return (true); } bool linuxkpi_napi_complete(struct napi_struct *napi) { NAPI_TRACE(napi); return (napi_complete_done(napi, 0)); } void linuxkpi_napi_disable(struct napi_struct *napi) { NAPI_TRACE(napi); set_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->state); while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state)) pause_sbt("napidslp", SBT_1MS, 0, C_HARDCLOCK); clear_bit(LKPI_NAPI_FLAG_DISABLE_PENDING, &napi->state); } void linuxkpi_napi_enable(struct napi_struct *napi) { NAPI_TRACE(napi); KASSERT(!test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state), ("%s: enabling napi %p already scheduled\n", __func__, napi)); mb(); /* Let us be scheduled. */ clear_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state); } void linuxkpi_napi_synchronize(struct napi_struct *napi) { NAPI_TRACE(napi); #if defined(SMP) /* Check & sleep while a napi is scheduled. */ while (test_bit(LKPI_NAPI_FLAG_IS_SCHEDULED, &napi->state)) pause_sbt("napisslp", SBT_1MS, 0, C_HARDCLOCK); #else mb(); #endif } /* -------------------------------------------------------------------------- */ static void lkpi_napi_task(void *ctx, int pending) { struct napi_struct *napi; int count; KASSERT(ctx != NULL, ("%s: napi %p, pending %d\n", __func__, ctx, pending)); napi = ctx; KASSERT(napi->poll != NULL, ("%s: napi %p poll is NULL\n", __func__, napi)); NAPI_TRACE_TASK(napi, pending, napi->budget); count = napi->poll(napi, napi->budget); napi->rx_count += count; NAPI_TRACE_TASK(napi, pending, count); /* * We must not check against count < pending here. There are situations * when a driver may "poll" and we may not have any work to do and that * would make us re-schedule ourseless for ever. */ if (count >= napi->budget) { /* * Have to re-schedule ourselves. napi_complete() was not run * in this case which means we are still SCHEDULED. * In order to queue another task we have to directly call * __napi_schedule() without _prep() in the way. */ __napi_schedule(napi); } } /* -------------------------------------------------------------------------- */ void linuxkpi_netif_napi_add(struct net_device *ndev, struct napi_struct *napi, int(*napi_poll)(struct napi_struct *, int)) { napi->dev = ndev; napi->poll = napi_poll; napi->budget = NAPI_POLL_WEIGHT; INIT_LIST_HEAD(&napi->rx_list); napi->rx_count = 0; TASK_INIT(&napi->napi_task, 0, lkpi_napi_task, napi); NAPI_LOCK(ndev); TAILQ_INSERT_TAIL(&ndev->napi_head, napi, entry); NAPI_UNLOCK(ndev); /* Anything else to do on the ndev? */ clear_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state); } static void lkpi_netif_napi_del_locked(struct napi_struct *napi) { struct net_device *ndev; ndev = napi->dev; NAPI_LOCK_ASSERT(ndev); set_bit(LKPI_NAPI_FLAG_SHUTDOWN, &napi->state); TAILQ_REMOVE(&ndev->napi_head, napi, entry); while (taskqueue_cancel(ndev->napi_tq, &napi->napi_task, NULL) != 0) taskqueue_drain(ndev->napi_tq, &napi->napi_task); } void linuxkpi_netif_napi_del(struct napi_struct *napi) { struct net_device *ndev; ndev = napi->dev; NAPI_LOCK(ndev); lkpi_netif_napi_del_locked(napi); NAPI_UNLOCK(ndev); } /* -------------------------------------------------------------------------- */ void linuxkpi_init_dummy_netdev(struct net_device *ndev) { memset(ndev, 0, sizeof(*ndev)); ndev->reg_state = NETREG_DUMMY; NAPI_LOCK_INIT(ndev); TAILQ_INIT(&ndev->napi_head); /* Anything else? */ ndev->napi_tq = taskqueue_create("tq_ndev_napi", M_WAITOK, taskqueue_thread_enqueue, &ndev->napi_tq); /* One thread for now. */ (void) taskqueue_start_threads(&ndev->napi_tq, 1, PWAIT, "ndev napi taskq"); } struct net_device * linuxkpi_alloc_netdev(size_t len, const char *name, uint32_t flags, void(*setup_func)(struct net_device *)) { struct net_device *ndev; ndev = malloc(sizeof(*ndev) + len, M_NETDEV, M_NOWAIT); if (ndev == NULL) return (ndev); /* Always first as it zeros! */ linuxkpi_init_dummy_netdev(ndev); strlcpy(ndev->name, name, sizeof(*ndev->name)); /* This needs extending as we support more. */ if (setup_func != NULL) setup_func(ndev); return (ndev); } void linuxkpi_free_netdev(struct net_device *ndev) { struct napi_struct *napi, *temp; NAPI_LOCK(ndev); TAILQ_FOREACH_SAFE(napi, &ndev->napi_head, entry, temp) { lkpi_netif_napi_del_locked(napi); } NAPI_UNLOCK(ndev); taskqueue_free(ndev->napi_tq); ndev->napi_tq = NULL; NAPI_LOCK_DESTROY(ndev); /* This needs extending as we support more. */ free(ndev, M_NETDEV); } diff --git a/sys/compat/linuxkpi/common/src/linux_schedule.c b/sys/compat/linuxkpi/common/src/linux_schedule.c index fa20a11f5ec7..507d6fc417d0 100644 --- a/sys/compat/linuxkpi/common/src/linux_schedule.c +++ b/sys/compat/linuxkpi/common/src/linux_schedule.c @@ -1,417 +1,417 @@ /*- * Copyright (c) 2017 Mark Johnston * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conds * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conds, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conds and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include static int linux_add_to_sleepqueue(void *wchan, struct task_struct *task, - const char *wmesg, int timeout, int state) + const char *wmesg, long timeout, int state) { int flags, ret; MPASS((state & ~(TASK_PARKED | TASK_NORMAL)) == 0); flags = SLEEPQ_SLEEP | ((state & TASK_INTERRUPTIBLE) != 0 ? SLEEPQ_INTERRUPTIBLE : 0); sleepq_add(wchan, NULL, wmesg, flags, 0); if (timeout != 0) sleepq_set_timeout(wchan, timeout); DROP_GIANT(); if ((state & TASK_INTERRUPTIBLE) != 0) { if (timeout == 0) ret = -sleepq_wait_sig(wchan, 0); else ret = -sleepq_timedwait_sig(wchan, 0); } else { if (timeout == 0) { sleepq_wait(wchan, 0); ret = 0; } else ret = -sleepq_timedwait(wchan, 0); } PICKUP_GIANT(); /* filter return value */ if (ret != 0 && ret != -EWOULDBLOCK) { linux_schedule_save_interrupt_value(task, ret); ret = -ERESTARTSYS; } return (ret); } unsigned int linux_msleep_interruptible(unsigned int ms) { int ret; /* guard against invalid values */ if (ms == 0) ms = 1; ret = -pause_sbt("lnxsleep", mstosbt(ms), 0, C_HARDCLOCK | C_CATCH); switch (ret) { case -EWOULDBLOCK: return (0); default: linux_schedule_save_interrupt_value(current, ret); return (ms); } } static int wake_up_task(struct task_struct *task, unsigned int state) { int ret; ret = 0; sleepq_lock(task); if ((atomic_read(&task->state) & state) != 0) { set_task_state(task, TASK_WAKING); sleepq_signal(task, SLEEPQ_SLEEP, 0, 0); ret = 1; } sleepq_release(task); return (ret); } bool linux_signal_pending(struct task_struct *task) { struct thread *td; sigset_t pending; td = task->task_thread; PROC_LOCK(td->td_proc); pending = td->td_siglist; SIGSETOR(pending, td->td_proc->p_siglist); SIGSETNAND(pending, td->td_sigmask); PROC_UNLOCK(td->td_proc); return (!SIGISEMPTY(pending)); } bool linux_fatal_signal_pending(struct task_struct *task) { struct thread *td; bool ret; td = task->task_thread; PROC_LOCK(td->td_proc); ret = SIGISMEMBER(td->td_siglist, SIGKILL) || SIGISMEMBER(td->td_proc->p_siglist, SIGKILL); PROC_UNLOCK(td->td_proc); return (ret); } bool linux_signal_pending_state(long state, struct task_struct *task) { MPASS((state & ~TASK_NORMAL) == 0); if ((state & TASK_INTERRUPTIBLE) == 0) return (false); return (linux_signal_pending(task)); } void linux_send_sig(int signo, struct task_struct *task) { struct thread *td; td = task->task_thread; PROC_LOCK(td->td_proc); tdsignal(td, signo); PROC_UNLOCK(td->td_proc); } int autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags, void *key __unused) { struct task_struct *task; int ret; task = wq->private; if ((ret = wake_up_task(task, state)) != 0) list_del_init(&wq->task_list); return (ret); } int default_wake_function(wait_queue_t *wq, unsigned int state, int flags, void *key __unused) { return (wake_up_task(wq->private, state)); } void linux_init_wait_entry(wait_queue_t *wq, int flags) { memset(wq, 0, sizeof(*wq)); wq->flags = flags; wq->private = current; wq->func = autoremove_wake_function; INIT_LIST_HEAD(&wq->task_list); } void linux_wake_up(wait_queue_head_t *wqh, unsigned int state, int nr, bool locked) { wait_queue_t *pos, *next; if (!locked) spin_lock(&wqh->lock); list_for_each_entry_safe(pos, next, &wqh->task_list, task_list) { if (pos->func == NULL) { if (wake_up_task(pos->private, state) != 0 && --nr == 0) break; } else { if (pos->func(pos, state, 0, NULL) != 0 && --nr == 0) break; } } if (!locked) spin_unlock(&wqh->lock); } void linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state) { spin_lock(&wqh->lock); if (list_empty(&wq->task_list)) __add_wait_queue(wqh, wq); set_task_state(current, state); spin_unlock(&wqh->lock); } void linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq) { spin_lock(&wqh->lock); set_task_state(current, TASK_RUNNING); if (!list_empty(&wq->task_list)) { __remove_wait_queue(wqh, wq); INIT_LIST_HEAD(&wq->task_list); } spin_unlock(&wqh->lock); } bool linux_waitqueue_active(wait_queue_head_t *wqh) { bool ret; spin_lock(&wqh->lock); ret = !list_empty(&wqh->task_list); spin_unlock(&wqh->lock); return (ret); } int -linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, int timeout, +linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, long timeout, unsigned int state, spinlock_t *lock) { struct task_struct *task; int ret; if (lock != NULL) spin_unlock_irq(lock); /* range check timeout */ if (timeout < 1) timeout = 1; else if (timeout == MAX_SCHEDULE_TIMEOUT) timeout = 0; task = current; sleepq_lock(task); if (atomic_read(&task->state) != TASK_WAKING) { ret = linux_add_to_sleepqueue(task, task, "wevent", timeout, state); } else { sleepq_release(task); ret = 0; } if (lock != NULL) spin_lock_irq(lock); return (ret); } -int -linux_schedule_timeout(int timeout) +long +linux_schedule_timeout(long timeout) { struct task_struct *task; + long remainder; int ret; int state; - int remainder; task = current; /* range check timeout */ if (timeout < 1) timeout = 1; else if (timeout == MAX_SCHEDULE_TIMEOUT) timeout = 0; - remainder = ticks + timeout; + remainder = jiffies + timeout; sleepq_lock(task); state = atomic_read(&task->state); if (state != TASK_WAKING) { ret = linux_add_to_sleepqueue(task, task, "sched", timeout, state); } else { sleepq_release(task); ret = 0; } set_task_state(task, TASK_RUNNING); if (timeout == 0) return (MAX_SCHEDULE_TIMEOUT); /* range check return value */ - remainder -= ticks; + remainder -= jiffies; /* range check return value */ if (ret == -ERESTARTSYS && remainder < 1) remainder = 1; else if (remainder < 0) remainder = 0; else if (remainder > timeout) remainder = timeout; return (remainder); } static void wake_up_sleepers(void *wchan) { sleepq_lock(wchan); sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0); sleepq_release(wchan); } #define bit_to_wchan(word, bit) ((void *)(((uintptr_t)(word) << 6) | (bit))) void linux_wake_up_bit(void *word, int bit) { wake_up_sleepers(bit_to_wchan(word, bit)); } int linux_wait_on_bit_timeout(unsigned long *word, int bit, unsigned int state, - int timeout) + long timeout) { struct task_struct *task; void *wchan; int ret; /* range check timeout */ if (timeout < 1) timeout = 1; else if (timeout == MAX_SCHEDULE_TIMEOUT) timeout = 0; task = current; wchan = bit_to_wchan(word, bit); for (;;) { sleepq_lock(wchan); if ((*word & (1 << bit)) == 0) { sleepq_release(wchan); ret = 0; break; } set_task_state(task, state); ret = linux_add_to_sleepqueue(wchan, task, "wbit", timeout, state); if (ret != 0) break; } set_task_state(task, TASK_RUNNING); return (ret); } void linux_wake_up_atomic_t(atomic_t *a) { wake_up_sleepers(a); } int linux_wait_on_atomic_t(atomic_t *a, unsigned int state) { struct task_struct *task; void *wchan; int ret; task = current; wchan = a; for (;;) { sleepq_lock(wchan); if (atomic_read(a) == 0) { sleepq_release(wchan); ret = 0; break; } set_task_state(task, state); ret = linux_add_to_sleepqueue(wchan, task, "watomic", 0, state); if (ret != 0) break; } set_task_state(task, TASK_RUNNING); return (ret); } bool linux_wake_up_state(struct task_struct *task, unsigned int state) { return (wake_up_task(task, state) != 0); } diff --git a/sys/compat/linuxkpi/common/src/linux_work.c b/sys/compat/linuxkpi/common/src/linux_work.c index 939bdbbc1434..cf15d1a9c41b 100644 --- a/sys/compat/linuxkpi/common/src/linux_work.c +++ b/sys/compat/linuxkpi/common/src/linux_work.c @@ -1,782 +1,782 @@ /*- * Copyright (c) 2017-2019 Hans Petter Selasky * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include /* * Define all work struct states */ enum { WORK_ST_IDLE, /* idle - not started */ WORK_ST_TIMER, /* timer is being started */ WORK_ST_TASK, /* taskqueue is being queued */ WORK_ST_EXEC, /* callback is being called */ WORK_ST_CANCEL, /* cancel is being requested */ WORK_ST_MAX, }; /* * Define global workqueues */ static struct workqueue_struct *linux_system_short_wq; static struct workqueue_struct *linux_system_long_wq; struct workqueue_struct *system_wq; struct workqueue_struct *system_long_wq; struct workqueue_struct *system_unbound_wq; struct workqueue_struct *system_highpri_wq; struct workqueue_struct *system_power_efficient_wq; struct taskqueue *linux_irq_work_tq; static int linux_default_wq_cpus = 4; static void linux_delayed_work_timer_fn(void *); /* * This function atomically updates the work state and returns the * previous state at the time of update. */ static uint8_t linux_update_state(atomic_t *v, const uint8_t *pstate) { int c, old; c = v->counter; while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) c = old; return (c); } /* * A LinuxKPI task is allowed to free itself inside the callback function * and cannot safely be referred after the callback function has * completed. This function gives the linux_work_fn() function a hint, * that the task is not going away and can have its state checked * again. Without this extra hint LinuxKPI tasks cannot be serialized * across multiple worker threads. */ static bool linux_work_exec_unblock(struct work_struct *work) { struct workqueue_struct *wq; struct work_exec *exec; bool retval = false; wq = work->work_queue; if (unlikely(wq == NULL)) goto done; WQ_EXEC_LOCK(wq); TAILQ_FOREACH(exec, &wq->exec_head, entry) { if (exec->target == work) { exec->target = NULL; retval = true; break; } } WQ_EXEC_UNLOCK(wq); done: return (retval); } static void linux_delayed_work_enqueue(struct delayed_work *dwork) { struct taskqueue *tq; tq = dwork->work.work_queue->taskqueue; taskqueue_enqueue(tq, &dwork->work.work_task); } /* * This function queues the given work structure on the given * workqueue. It returns non-zero if the work was successfully * [re-]queued. Else the work is already pending for completion. */ bool linux_queue_work_on(int cpu __unused, struct workqueue_struct *wq, struct work_struct *work) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_TASK, /* start queuing task */ [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */ [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */ [WORK_ST_EXEC] = WORK_ST_TASK, /* queue task another time */ [WORK_ST_CANCEL] = WORK_ST_TASK, /* start queuing task again */ }; if (atomic_read(&wq->draining) != 0) return (!work_pending(work)); switch (linux_update_state(&work->state, states)) { case WORK_ST_EXEC: case WORK_ST_CANCEL: if (linux_work_exec_unblock(work) != 0) return (true); /* FALLTHROUGH */ case WORK_ST_IDLE: work->work_queue = wq; taskqueue_enqueue(wq->taskqueue, &work->work_task); return (true); default: return (false); /* already on a queue */ } } /* * Callback func for linux_queue_rcu_work */ static void rcu_work_func(struct rcu_head *rcu) { struct rcu_work *rwork; rwork = container_of(rcu, struct rcu_work, rcu); linux_queue_work_on(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); } /* * This function queue a work after a grace period * If the work was already pending it returns false, * if not it calls call_rcu and returns true. */ bool linux_queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) { if (!linux_work_pending(&rwork->work)) { rwork->wq = wq; linux_call_rcu(RCU_TYPE_REGULAR, &rwork->rcu, rcu_work_func); return (true); } return (false); } /* * This function waits for the last execution of a work and then * flush the work. * It returns true if the work was pending and we waited, it returns * false otherwise. */ bool linux_flush_rcu_work(struct rcu_work *rwork) { if (linux_work_pending(&rwork->work)) { linux_rcu_barrier(RCU_TYPE_REGULAR); linux_flush_work(&rwork->work); return (true); } return (linux_flush_work(&rwork->work)); } /* * This function queues the given work structure on the given * workqueue after a given delay in ticks. It returns true if the * work was successfully [re-]queued. Else the work is already pending * for completion. */ bool linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, - struct delayed_work *dwork, unsigned delay) + struct delayed_work *dwork, unsigned long delay) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_TIMER, /* start timeout */ [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */ [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */ [WORK_ST_EXEC] = WORK_ST_TIMER, /* start timeout */ [WORK_ST_CANCEL] = WORK_ST_TIMER, /* start timeout */ }; bool res; if (atomic_read(&wq->draining) != 0) return (!work_pending(&dwork->work)); mtx_lock(&dwork->timer.mtx); switch (linux_update_state(&dwork->work.state, states)) { case WORK_ST_EXEC: case WORK_ST_CANCEL: if (delay == 0 && linux_work_exec_unblock(&dwork->work)) { dwork->timer.expires = jiffies; res = true; goto out; } /* FALLTHROUGH */ case WORK_ST_IDLE: dwork->work.work_queue = wq; dwork->timer.expires = jiffies + delay; if (delay == 0) { linux_delayed_work_enqueue(dwork); } else if (unlikely(cpu != WORK_CPU_UNBOUND)) { callout_reset_on(&dwork->timer.callout, delay, &linux_delayed_work_timer_fn, dwork, cpu); } else { callout_reset(&dwork->timer.callout, delay, &linux_delayed_work_timer_fn, dwork); } res = true; break; default: res = false; break; } out: mtx_unlock(&dwork->timer.mtx); return (res); } void linux_work_fn(void *context, int pending) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ [WORK_ST_TIMER] = WORK_ST_EXEC, /* delayed work w/o timeout */ [WORK_ST_TASK] = WORK_ST_EXEC, /* call callback */ [WORK_ST_EXEC] = WORK_ST_IDLE, /* complete callback */ [WORK_ST_CANCEL] = WORK_ST_EXEC, /* failed to cancel */ }; struct work_struct *work; struct workqueue_struct *wq; struct work_exec exec; struct task_struct *task; task = current; /* setup local variables */ work = context; wq = work->work_queue; /* store target pointer */ exec.target = work; /* insert executor into list */ WQ_EXEC_LOCK(wq); TAILQ_INSERT_TAIL(&wq->exec_head, &exec, entry); while (1) { switch (linux_update_state(&work->state, states)) { case WORK_ST_TIMER: case WORK_ST_TASK: case WORK_ST_CANCEL: WQ_EXEC_UNLOCK(wq); /* set current work structure */ task->work = work; /* call work function */ work->func(work); /* set current work structure */ task->work = NULL; WQ_EXEC_LOCK(wq); /* check if unblocked */ if (exec.target != work) { /* reapply block */ exec.target = work; break; } /* FALLTHROUGH */ default: goto done; } } done: /* remove executor from list */ TAILQ_REMOVE(&wq->exec_head, &exec, entry); WQ_EXEC_UNLOCK(wq); } void linux_delayed_work_fn(void *context, int pending) { struct delayed_work *dwork = context; /* * Make sure the timer belonging to the delayed work gets * drained before invoking the work function. Else the timer * mutex may still be in use which can lead to use-after-free * situations, because the work function might free the work * structure before returning. */ callout_drain(&dwork->timer.callout); linux_work_fn(&dwork->work, pending); } static void linux_delayed_work_timer_fn(void *arg) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ [WORK_ST_TIMER] = WORK_ST_TASK, /* start queueing task */ [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */ [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */ [WORK_ST_CANCEL] = WORK_ST_TASK, /* failed to cancel */ }; struct delayed_work *dwork = arg; switch (linux_update_state(&dwork->work.state, states)) { case WORK_ST_TIMER: case WORK_ST_CANCEL: linux_delayed_work_enqueue(dwork); break; default: break; } } /* * This function cancels the given work structure in a * non-blocking fashion. It returns non-zero if the work was * successfully cancelled. Else the work may still be busy or already * cancelled. */ bool linux_cancel_work(struct work_struct *work) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ [WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */ [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel */ [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */ [WORK_ST_CANCEL] = WORK_ST_IDLE, /* can't happen */ }; struct taskqueue *tq; MPASS(atomic_read(&work->state) != WORK_ST_TIMER); MPASS(atomic_read(&work->state) != WORK_ST_CANCEL); switch (linux_update_state(&work->state, states)) { case WORK_ST_TASK: tq = work->work_queue->taskqueue; if (taskqueue_cancel(tq, &work->work_task, NULL) == 0) return (true); /* FALLTHROUGH */ default: return (false); } } /* * This function cancels the given work structure in a synchronous * fashion. It returns non-zero if the work was successfully * cancelled. Else the work was already cancelled. */ bool linux_cancel_work_sync(struct work_struct *work) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ [WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */ [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */ [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */ [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */ }; struct taskqueue *tq; bool retval = false; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "linux_cancel_work_sync() might sleep"); retry: switch (linux_update_state(&work->state, states)) { case WORK_ST_IDLE: case WORK_ST_TIMER: return (retval); case WORK_ST_EXEC: tq = work->work_queue->taskqueue; if (taskqueue_cancel(tq, &work->work_task, NULL) != 0) taskqueue_drain(tq, &work->work_task); goto retry; /* work may have restarted itself */ default: tq = work->work_queue->taskqueue; if (taskqueue_cancel(tq, &work->work_task, NULL) != 0) taskqueue_drain(tq, &work->work_task); retval = true; goto retry; } } /* * This function atomically stops the timer and callback. The timer * callback will not be called after this function returns. This * functions returns true when the timeout was cancelled. Else the * timeout was not started or has already been called. */ static inline bool linux_cancel_timer(struct delayed_work *dwork, bool drain) { bool cancelled; mtx_lock(&dwork->timer.mtx); cancelled = (callout_stop(&dwork->timer.callout) == 1); mtx_unlock(&dwork->timer.mtx); /* check if we should drain */ if (drain) callout_drain(&dwork->timer.callout); return (cancelled); } /* * This function cancels the given delayed work structure in a * non-blocking fashion. It returns non-zero if the work was * successfully cancelled. Else the work may still be busy or already * cancelled. */ bool linux_cancel_delayed_work(struct delayed_work *dwork) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ [WORK_ST_TIMER] = WORK_ST_CANCEL, /* try to cancel */ [WORK_ST_TASK] = WORK_ST_CANCEL, /* try to cancel */ [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */ [WORK_ST_CANCEL] = WORK_ST_CANCEL, /* NOP */ }; struct taskqueue *tq; bool cancelled; mtx_lock(&dwork->timer.mtx); switch (linux_update_state(&dwork->work.state, states)) { case WORK_ST_TIMER: case WORK_ST_CANCEL: cancelled = (callout_stop(&dwork->timer.callout) == 1); if (cancelled) { atomic_cmpxchg(&dwork->work.state, WORK_ST_CANCEL, WORK_ST_IDLE); mtx_unlock(&dwork->timer.mtx); return (true); } /* FALLTHROUGH */ case WORK_ST_TASK: tq = dwork->work.work_queue->taskqueue; if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) { atomic_cmpxchg(&dwork->work.state, WORK_ST_CANCEL, WORK_ST_IDLE); mtx_unlock(&dwork->timer.mtx); return (true); } /* FALLTHROUGH */ default: mtx_unlock(&dwork->timer.mtx); return (false); } } /* * This function cancels the given work structure in a synchronous * fashion. It returns true if the work was successfully * cancelled. Else the work was already cancelled. */ static bool linux_cancel_delayed_work_sync_int(struct delayed_work *dwork) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ [WORK_ST_TIMER] = WORK_ST_IDLE, /* cancel and drain */ [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */ [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */ [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */ }; struct taskqueue *tq; int ret, state; bool cancelled; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "linux_cancel_delayed_work_sync() might sleep"); mtx_lock(&dwork->timer.mtx); state = linux_update_state(&dwork->work.state, states); switch (state) { case WORK_ST_IDLE: mtx_unlock(&dwork->timer.mtx); return (false); case WORK_ST_TIMER: case WORK_ST_CANCEL: cancelled = (callout_stop(&dwork->timer.callout) == 1); tq = dwork->work.work_queue->taskqueue; ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL); mtx_unlock(&dwork->timer.mtx); callout_drain(&dwork->timer.callout); taskqueue_drain(tq, &dwork->work.work_task); return (cancelled || (ret != 0)); default: tq = dwork->work.work_queue->taskqueue; ret = taskqueue_cancel(tq, &dwork->work.work_task, NULL); mtx_unlock(&dwork->timer.mtx); if (ret != 0) taskqueue_drain(tq, &dwork->work.work_task); return (ret != 0); } } bool linux_cancel_delayed_work_sync(struct delayed_work *dwork) { bool res; res = false; while (linux_cancel_delayed_work_sync_int(dwork)) res = true; return (res); } /* * This function waits until the given work structure is completed. * It returns non-zero if the work was successfully * waited for. Else the work was not waited for. */ bool linux_flush_work(struct work_struct *work) { struct taskqueue *tq; bool retval; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "linux_flush_work() might sleep"); switch (atomic_read(&work->state)) { case WORK_ST_IDLE: return (false); default: tq = work->work_queue->taskqueue; retval = taskqueue_poll_is_busy(tq, &work->work_task); taskqueue_drain(tq, &work->work_task); return (retval); } } /* * This function waits until the given delayed work structure is * completed. It returns non-zero if the work was successfully waited * for. Else the work was not waited for. */ bool linux_flush_delayed_work(struct delayed_work *dwork) { struct taskqueue *tq; bool retval; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "linux_flush_delayed_work() might sleep"); switch (atomic_read(&dwork->work.state)) { case WORK_ST_IDLE: return (false); case WORK_ST_TIMER: if (linux_cancel_timer(dwork, 1)) linux_delayed_work_enqueue(dwork); /* FALLTHROUGH */ default: tq = dwork->work.work_queue->taskqueue; retval = taskqueue_poll_is_busy(tq, &dwork->work.work_task); taskqueue_drain(tq, &dwork->work.work_task); return (retval); } } /* * This function returns true if the given work is pending, and not * yet executing: */ bool linux_work_pending(struct work_struct *work) { switch (atomic_read(&work->state)) { case WORK_ST_TIMER: case WORK_ST_TASK: case WORK_ST_CANCEL: return (true); default: return (false); } } /* * This function returns true if the given work is busy. */ bool linux_work_busy(struct work_struct *work) { struct taskqueue *tq; switch (atomic_read(&work->state)) { case WORK_ST_IDLE: return (false); case WORK_ST_EXEC: tq = work->work_queue->taskqueue; return (taskqueue_poll_is_busy(tq, &work->work_task)); default: return (true); } } struct workqueue_struct * linux_create_workqueue_common(const char *name, int cpus) { struct workqueue_struct *wq; /* * If zero CPUs are specified use the default number of CPUs: */ if (cpus == 0) cpus = linux_default_wq_cpus; wq = kmalloc(sizeof(*wq), M_WAITOK | M_ZERO); wq->taskqueue = taskqueue_create(name, M_WAITOK, taskqueue_thread_enqueue, &wq->taskqueue); atomic_set(&wq->draining, 0); taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name); TAILQ_INIT(&wq->exec_head); mtx_init(&wq->exec_mtx, "linux_wq_exec", NULL, MTX_DEF); return (wq); } void linux_destroy_workqueue(struct workqueue_struct *wq) { atomic_inc(&wq->draining); drain_workqueue(wq); taskqueue_free(wq->taskqueue); mtx_destroy(&wq->exec_mtx); kfree(wq); } void linux_init_delayed_work(struct delayed_work *dwork, work_func_t func) { memset(dwork, 0, sizeof(*dwork)); dwork->work.func = func; TASK_INIT(&dwork->work.work_task, 0, linux_delayed_work_fn, dwork); mtx_init(&dwork->timer.mtx, spin_lock_name("lkpi-dwork"), NULL, MTX_DEF | MTX_NOWITNESS); callout_init_mtx(&dwork->timer.callout, &dwork->timer.mtx, 0); } struct work_struct * linux_current_work(void) { return (current->work); } static void linux_work_init(void *arg) { int max_wq_cpus = mp_ncpus + 1; /* avoid deadlock when there are too few threads */ if (max_wq_cpus < 4) max_wq_cpus = 4; /* set default number of CPUs */ linux_default_wq_cpus = max_wq_cpus; linux_system_short_wq = alloc_workqueue("linuxkpi_short_wq", 0, max_wq_cpus); linux_system_long_wq = alloc_workqueue("linuxkpi_long_wq", 0, max_wq_cpus); /* populate the workqueue pointers */ system_long_wq = linux_system_long_wq; system_wq = linux_system_short_wq; system_power_efficient_wq = linux_system_short_wq; system_unbound_wq = linux_system_short_wq; system_highpri_wq = linux_system_short_wq; } SYSINIT(linux_work_init, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_init, NULL); static void linux_work_uninit(void *arg) { destroy_workqueue(linux_system_short_wq); destroy_workqueue(linux_system_long_wq); /* clear workqueue pointers */ system_long_wq = NULL; system_wq = NULL; system_power_efficient_wq = NULL; system_unbound_wq = NULL; system_highpri_wq = NULL; } SYSUNINIT(linux_work_uninit, SI_SUB_TASKQ, SI_ORDER_THIRD, linux_work_uninit, NULL); void linux_irq_work_fn(void *context, int pending) { struct irq_work *irqw = context; irqw->func(irqw); } static void linux_irq_work_init_fn(void *context, int pending) { /* * LinuxKPI performs lazy allocation of memory structures required by * current on the first access to it. As some irq_work clients read * it with spinlock taken, we have to preallocate td_lkpi_task before * first call to irq_work_queue(). As irq_work uses a single thread, * it is enough to read current once at SYSINIT stage. */ if (current == NULL) panic("irq_work taskqueue is not initialized"); } static struct task linux_irq_work_init_task = TASK_INITIALIZER(0, linux_irq_work_init_fn, &linux_irq_work_init_task); static void linux_irq_work_init(void *arg) { linux_irq_work_tq = taskqueue_create_fast("linuxkpi_irq_wq", M_WAITOK, taskqueue_thread_enqueue, &linux_irq_work_tq); taskqueue_start_threads(&linux_irq_work_tq, 1, PWAIT, "linuxkpi_irq_wq"); taskqueue_enqueue(linux_irq_work_tq, &linux_irq_work_init_task); } SYSINIT(linux_irq_work_init, SI_SUB_TASKQ, SI_ORDER_SECOND, linux_irq_work_init, NULL); static void linux_irq_work_uninit(void *arg) { taskqueue_drain_all(linux_irq_work_tq); taskqueue_free(linux_irq_work_tq); } SYSUNINIT(linux_irq_work_uninit, SI_SUB_TASKQ, SI_ORDER_SECOND, linux_irq_work_uninit, NULL); diff --git a/sys/kern/subr_ticks.S b/sys/kern/subr_ticks.S index ad01d5d67165..5cb994293d91 100644 --- a/sys/kern/subr_ticks.S +++ b/sys/kern/subr_ticks.S @@ -1,36 +1,42 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2025 Mark Johnston */ /* * Define the "ticks" and "ticksl" variables. The former is overlaid onto the - * low bits of the latter. + * low bits of the latter. Also define an alias "jiffies" of "ticksl", + * used by the LinuxKPI. */ #if defined(__aarch64__) #include #include GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) #endif #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define TICKS_OFFSET 0 #else #define TICKS_OFFSET (__SIZEOF_LONG__ - __SIZEOF_INT__) #endif .bss .global ticksl .type ticksl, %object .balign __SIZEOF_LONG__ ticksl: .zero __SIZEOF_LONG__ .size ticksl, __SIZEOF_LONG__ .global ticks .type ticks, %object ticks =ticksl + TICKS_OFFSET .size ticks, __SIZEOF_INT__ + + .global jiffies + .type jiffies, %object +jiffies = ticksl + .size jiffies, __SIZEOF_LONG__