diff --git a/sys/compat/linuxkpi/common/include/linux/skbuff.h b/sys/compat/linuxkpi/common/include/linux/skbuff.h index ee3f427aa6e9..d3839820d3d5 100644 --- a/sys/compat/linuxkpi/common/include/linux/skbuff.h +++ b/sys/compat/linuxkpi/common/include/linux/skbuff.h @@ -1,1095 +1,1103 @@ /*- * Copyright (c) 2020-2023 The FreeBSD Foundation * Copyright (c) 2021-2023 Bjoern A. Zeeb * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL. * Do not rely on the internals of this implementation. They are highly * likely to change as we will improve the integration to FreeBSD mbufs. */ #ifndef _LINUXKPI_LINUX_SKBUFF_H #define _LINUXKPI_LINUX_SKBUFF_H #include #include #include #include #include #include #include #include #include +#include "opt_wlan.h" + +/* Currently this is only used for wlan so we can depend on that. */ +#if defined(IEEE80211_DEBUG) && !defined(SKB_DEBUG) +#define SKB_DEBUG +#endif + /* #define SKB_DEBUG */ + #ifdef SKB_DEBUG #define DSKB_TODO 0x01 #define DSKB_IMPROVE 0x02 #define DSKB_TRACE 0x10 #define DSKB_TRACEX 0x20 extern int linuxkpi_debug_skb; #define SKB_TODO() \ if (linuxkpi_debug_skb & DSKB_TODO) \ printf("SKB_TODO %s:%d\n", __func__, __LINE__) #define SKB_IMPROVE(...) \ if (linuxkpi_debug_skb & DSKB_IMPROVE) \ printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__) #define SKB_TRACE(_s) \ if (linuxkpi_debug_skb & DSKB_TRACE) \ printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s) #define SKB_TRACE2(_s, _p) \ if (linuxkpi_debug_skb & DSKB_TRACE) \ printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p) #define SKB_TRACE_FMT(_s, _fmt, ...) \ if (linuxkpi_debug_skb & DSKB_TRACE) \ printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s, \ __VA_ARGS__) #else #define SKB_TODO() do { } while(0) #define SKB_IMPROVE(...) do { } while(0) #define SKB_TRACE(_s) do { } while(0) #define SKB_TRACE2(_s, _p) do { } while(0) #define SKB_TRACE_FMT(_s, ...) do { } while(0) #endif enum sk_buff_pkt_type { PACKET_BROADCAST, PACKET_MULTICAST, PACKET_OTHERHOST, }; struct skb_shared_hwtstamps { ktime_t hwtstamp; }; #define NET_SKB_PAD max(CACHE_LINE_SIZE, 32) #define SKB_DATA_ALIGN(_x) roundup2(_x, CACHE_LINE_SIZE) struct sk_buff_head { /* XXX TODO */ union { struct { struct sk_buff *next; struct sk_buff *prev; }; struct sk_buff_head_l { struct sk_buff *next; struct sk_buff *prev; } list; }; size_t qlen; spinlock_t lock; }; enum sk_checksum_flags { CHECKSUM_NONE = 0x00, CHECKSUM_UNNECESSARY = 0x01, CHECKSUM_PARTIAL = 0x02, CHECKSUM_COMPLETE = 0x04, }; struct skb_frag { /* XXX TODO */ struct page *page; /* XXX-BZ These three are a wild guess so far! */ off_t offset; size_t size; }; typedef struct skb_frag skb_frag_t; enum skb_shared_info_gso_type { SKB_GSO_TCPV4, SKB_GSO_TCPV6, }; struct skb_shared_info { enum skb_shared_info_gso_type gso_type; uint16_t gso_size; uint16_t nr_frags; struct sk_buff *frag_list; skb_frag_t frags[64]; /* XXX TODO, 16xpage? */ }; struct sk_buff { /* XXX TODO */ union { /* struct sk_buff_head */ struct { struct sk_buff *next; struct sk_buff *prev; }; struct list_head list; }; uint32_t _alloc_len; /* Length of alloc data-buf. XXX-BZ give up for truesize? */ uint32_t len; /* ? */ uint32_t data_len; /* ? If we have frags? */ uint32_t truesize; /* The total size of all buffers, incl. frags. */ uint16_t mac_len; /* Link-layer header length. */ __sum16 csum; uint16_t l3hdroff; /* network header offset from *head */ uint16_t l4hdroff; /* transport header offset from *head */ uint32_t priority; uint16_t qmap; /* queue mapping */ uint16_t _flags; /* Internal flags. */ #define _SKB_FLAGS_SKBEXTFRAG 0x0001 enum sk_buff_pkt_type pkt_type; uint16_t mac_header; /* offset of mac_header */ /* "Scratch" area for layers to store metadata. */ /* ??? I see sizeof() operations so probably an array. */ uint8_t cb[64] __aligned(CACHE_LINE_SIZE); struct net_device *dev; void *sk; /* XXX net/sock.h? */ int csum_offset, csum_start, ip_summed, protocol; uint8_t *head; /* Head of buffer. */ uint8_t *data; /* Head of data. */ uint8_t *tail; /* End of data. */ uint8_t *end; /* End of buffer. */ struct skb_shared_info *shinfo; /* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */ void *m; void(*m_free_func)(void *); /* Force padding to CACHE_LINE_SIZE. */ uint8_t __scratch[0] __aligned(CACHE_LINE_SIZE); }; /* -------------------------------------------------------------------------- */ struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t); struct sk_buff *linuxkpi_dev_alloc_skb(size_t, gfp_t); struct sk_buff *linuxkpi_build_skb(void *, size_t); void linuxkpi_kfree_skb(struct sk_buff *); struct sk_buff *linuxkpi_skb_copy(struct sk_buff *, gfp_t); /* -------------------------------------------------------------------------- */ static inline struct sk_buff * alloc_skb(size_t size, gfp_t gfp) { struct sk_buff *skb; skb = linuxkpi_alloc_skb(size, gfp); SKB_TRACE(skb); return (skb); } static inline struct sk_buff * __dev_alloc_skb(size_t len, gfp_t gfp) { struct sk_buff *skb; skb = linuxkpi_dev_alloc_skb(len, gfp); SKB_IMPROVE(); SKB_TRACE(skb); return (skb); } static inline struct sk_buff * dev_alloc_skb(size_t len) { struct sk_buff *skb; skb = __dev_alloc_skb(len, GFP_NOWAIT); SKB_IMPROVE(); SKB_TRACE(skb); return (skb); } static inline void kfree_skb(struct sk_buff *skb) { SKB_TRACE(skb); linuxkpi_kfree_skb(skb); } static inline void dev_kfree_skb(struct sk_buff *skb) { SKB_TRACE(skb); kfree_skb(skb); } static inline void dev_kfree_skb_any(struct sk_buff *skb) { SKB_TRACE(skb); dev_kfree_skb(skb); } static inline void dev_kfree_skb_irq(struct sk_buff *skb) { SKB_TRACE(skb); SKB_IMPROVE("Do we have to defer this?"); dev_kfree_skb(skb); } static inline struct sk_buff * build_skb(void *data, unsigned int fragsz) { struct sk_buff *skb; skb = linuxkpi_build_skb(data, fragsz); SKB_TRACE(skb); return (skb); } /* -------------------------------------------------------------------------- */ /* XXX BZ review this one for terminal condition as Linux "queues" are special. */ #define skb_list_walk_safe(_q, skb, tmp) \ for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp)) /* Add headroom; cannot do once there is data in there. */ static inline void skb_reserve(struct sk_buff *skb, size_t len) { SKB_TRACE(skb); #if 0 /* Apparently it is allowed to call skb_reserve multiple times in a row. */ KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p " "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail)); #else KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not " "empty head %p data %p tail %p len %u\n", __func__, skb, skb->head, skb->data, skb->tail, skb->len)); #endif skb->data += len; skb->tail += len; } /* * Remove headroom; return new data pointer; basically make space at the * front to copy data in (manually). */ static inline void * __skb_push(struct sk_buff *skb, size_t len) { SKB_TRACE(skb); KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - " "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data)); skb->len += len; skb->data -= len; return (skb->data); } static inline void * skb_push(struct sk_buff *skb, size_t len) { SKB_TRACE(skb); return (__skb_push(skb, len)); } /* * Length of the data on the skb (without any frags)??? */ static inline size_t skb_headlen(struct sk_buff *skb) { SKB_TRACE(skb); return (skb->len - skb->data_len); } /* Return the end of data (tail pointer). */ static inline uint8_t * skb_tail_pointer(struct sk_buff *skb) { SKB_TRACE(skb); return (skb->tail); } /* Return number of bytes available at end of buffer. */ static inline unsigned int skb_tailroom(struct sk_buff *skb) { SKB_TRACE(skb); KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, " "end %p tail %p\n", __func__, skb, skb->end, skb->tail)); return (skb->end - skb->tail); } /* Return numer of bytes available at the beginning of buffer. */ static inline unsigned int skb_headroom(struct sk_buff *skb) { SKB_TRACE(skb); KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, " "data %p head %p\n", __func__, skb, skb->data, skb->head)); return (skb->data - skb->head); } /* * Remove tailroom; return the old tail pointer; basically make space at * the end to copy data in (manually). See also skb_put_data() below. */ static inline void * __skb_put(struct sk_buff *skb, size_t len) { void *s; SKB_TRACE(skb); KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + " "len %zu) > end %p, head %p data %p len %u\n", __func__, skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len)); s = skb_tail_pointer(skb); if (len == 0) return (s); skb->tail += len; skb->len += len; #ifdef SKB_DEBUG if (linuxkpi_debug_skb & DSKB_TRACEX) printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n", __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end, s, len); #endif return (s); } static inline void * skb_put(struct sk_buff *skb, size_t len) { SKB_TRACE(skb); return (__skb_put(skb, len)); } /* skb_put() + copying data in. */ static inline void * skb_put_data(struct sk_buff *skb, const void *buf, size_t len) { void *s; SKB_TRACE2(skb, buf); s = skb_put(skb, len); if (len == 0) return (s); memcpy(s, buf, len); return (s); } /* skb_put() + filling with zeros. */ static inline void * skb_put_zero(struct sk_buff *skb, size_t len) { void *s; SKB_TRACE(skb); s = skb_put(skb, len); memset(s, '\0', len); return (s); } /* * Remove len bytes from beginning of data. * * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic; * we return the advanced data pointer so we don't have to keep a temp, correct? */ static inline void * skb_pull(struct sk_buff *skb, size_t len) { SKB_TRACE(skb); #if 0 /* Apparently this doesn't barf... */ KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n", __func__, skb, skb->len, len, skb->data)); #endif if (skb->len < len) return (NULL); skb->len -= len; skb->data += len; return (skb->data); } /* Reduce skb data to given length or do nothing if smaller already. */ static inline void __skb_trim(struct sk_buff *skb, unsigned int len) { SKB_TRACE(skb); if (skb->len < len) return; skb->len = len; skb->tail = skb->data + skb->len; } static inline void skb_trim(struct sk_buff *skb, unsigned int len) { return (__skb_trim(skb, len)); } static inline struct skb_shared_info * skb_shinfo(struct sk_buff *skb) { SKB_TRACE(skb); return (skb->shinfo); } static inline void skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page, off_t offset, size_t size, unsigned int truesize) { struct skb_shared_info *shinfo; SKB_TRACE(skb); #ifdef SKB_DEBUG if (linuxkpi_debug_skb & DSKB_TRACEX) printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d " "page %#jx offset %ju size %zu truesize %u\n", __func__, skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno, (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset, size, truesize); #endif shinfo = skb_shinfo(skb); KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p " "fragno %d too big\n", __func__, skb, fragno)); shinfo->frags[fragno].page = page; shinfo->frags[fragno].offset = offset; shinfo->frags[fragno].size = size; shinfo->nr_frags = fragno + 1; skb->len += size; skb->data_len += size; skb->truesize += truesize; /* XXX TODO EXTEND truesize? */ } /* -------------------------------------------------------------------------- */ /* XXX BZ review this one for terminal condition as Linux "queues" are special. */ #define skb_queue_walk(_q, skb) \ for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q); \ (skb) = (skb)->next) #define skb_queue_walk_safe(_q, skb, tmp) \ for ((skb) = (_q)->next, (tmp) = (skb)->next; \ (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next) static inline bool skb_queue_empty(struct sk_buff_head *q) { SKB_TRACE(q); return (q->qlen == 0); } static inline void __skb_queue_head_init(struct sk_buff_head *q) { SKB_TRACE(q); q->prev = q->next = (struct sk_buff *)q; q->qlen = 0; } static inline void skb_queue_head_init(struct sk_buff_head *q) { SKB_TRACE(q); return (__skb_queue_head_init(q)); } static inline void __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *q) { SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q); new->prev = prev; new->next = next; ((struct sk_buff_head_l *)next)->prev = new; ((struct sk_buff_head_l *)prev)->next = new; q->qlen++; } static inline void __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb, struct sk_buff *new) { SKB_TRACE_FMT(q, "skb %p new %p", skb, new); __skb_insert(new, skb, ((struct sk_buff_head_l *)skb)->next, q); } static inline void __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb, struct sk_buff *new) { SKB_TRACE_FMT(q, "skb %p new %p", skb, new); __skb_insert(new, skb->prev, skb, q); } static inline void __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new) { SKB_TRACE2(q, new); __skb_queue_before(q, (struct sk_buff *)q, new); } static inline void skb_queue_tail(struct sk_buff_head *q, struct sk_buff *new) { SKB_TRACE2(q, new); return (__skb_queue_tail(q, new)); } static inline struct sk_buff * skb_peek(struct sk_buff_head *q) { struct sk_buff *skb; skb = q->next; SKB_TRACE2(q, skb); if (skb == (struct sk_buff *)q) return (NULL); return (skb); } static inline struct sk_buff * skb_peek_tail(struct sk_buff_head *q) { struct sk_buff *skb; skb = q->prev; SKB_TRACE2(q, skb); if (skb == (struct sk_buff *)q) return (NULL); return (skb); } static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head) { SKB_TRACE2(skb, head); struct sk_buff *p, *n;; head->qlen--; p = skb->prev; n = skb->next; p->next = n; n->prev = p; skb->prev = skb->next = NULL; } static inline void skb_unlink(struct sk_buff *skb, struct sk_buff_head *head) { SKB_TRACE2(skb, head); return (__skb_unlink(skb, head)); } static inline struct sk_buff * __skb_dequeue(struct sk_buff_head *q) { struct sk_buff *skb; SKB_TRACE(q); skb = q->next; if (skb == (struct sk_buff *)q) return (NULL); if (skb != NULL) __skb_unlink(skb, q); SKB_TRACE(skb); return (skb); } static inline struct sk_buff * skb_dequeue(struct sk_buff_head *q) { SKB_TRACE(q); return (__skb_dequeue(q)); } static inline struct sk_buff * skb_dequeue_tail(struct sk_buff_head *q) { struct sk_buff *skb; skb = skb_peek_tail(q); if (skb != NULL) __skb_unlink(skb, q); SKB_TRACE2(q, skb); return (skb); } static inline void __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb) { SKB_TRACE2(q, skb); __skb_queue_after(q, (struct sk_buff *)q, skb); } static inline void skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb) { SKB_TRACE2(q, skb); __skb_queue_after(q, (struct sk_buff *)q, skb); } static inline uint32_t skb_queue_len(struct sk_buff_head *head) { SKB_TRACE(head); return (head->qlen); } static inline uint32_t skb_queue_len_lockless(const struct sk_buff_head *head) { SKB_TRACE(head); return (READ_ONCE(head->qlen)); } static inline void __skb_queue_purge(struct sk_buff_head *q) { struct sk_buff *skb; SKB_TRACE(q); while ((skb = __skb_dequeue(q)) != NULL) kfree_skb(skb); } static inline void skb_queue_purge(struct sk_buff_head *q) { SKB_TRACE(q); return (__skb_queue_purge(q)); } static inline struct sk_buff * skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb) { SKB_TRACE2(q, skb); /* XXX what is the q argument good for? */ return (skb->prev); } /* -------------------------------------------------------------------------- */ static inline struct sk_buff * skb_copy(struct sk_buff *skb, gfp_t gfp) { struct sk_buff *new; new = linuxkpi_skb_copy(skb, gfp); SKB_TRACE2(skb, new); return (new); } static inline void consume_skb(struct sk_buff *skb) { SKB_TRACE(skb); SKB_TODO(); } static inline uint16_t skb_checksum(struct sk_buff *skb, int offs, size_t len, int x) { SKB_TRACE(skb); SKB_TODO(); return (0xffff); } static inline int skb_checksum_start_offset(struct sk_buff *skb) { SKB_TRACE(skb); SKB_TODO(); return (-1); } static inline dma_addr_t skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x, size_t fragsz, enum dma_data_direction dir) { SKB_TRACE2(frag, dev); SKB_TODO(); return (-1); } static inline size_t skb_frag_size(const skb_frag_t *frag) { SKB_TRACE(frag); SKB_TODO(); return (-1); } #define skb_walk_frags(_skb, _frag) \ for ((_frag) = (_skb); false; (_frag)++) static inline void skb_checksum_help(struct sk_buff *skb) { SKB_TRACE(skb); SKB_TODO(); } static inline bool skb_ensure_writable(struct sk_buff *skb, size_t off) { SKB_TRACE(skb); SKB_TODO(); return (false); } static inline void * skb_frag_address(const skb_frag_t *frag) { SKB_TRACE(frag); SKB_TODO(); return (NULL); } static inline void skb_free_frag(void *frag) { page_frag_free(frag); } static inline struct sk_buff * skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags) { SKB_TRACE(skb); SKB_TODO(); return (NULL); } static inline bool skb_is_gso(struct sk_buff *skb) { SKB_TRACE(skb); SKB_IMPROVE("Really a TODO but get it away from logging"); return (false); } static inline void skb_mark_not_on_list(struct sk_buff *skb) { SKB_TRACE(skb); SKB_TODO(); } static inline void ___skb_queue_splice(const struct sk_buff_head *from, struct sk_buff *p, struct sk_buff *n) { struct sk_buff *b, *e; b = from->next; e = from->prev; b->prev = p; ((struct sk_buff_head_l *)p)->next = b; e->next = n; ((struct sk_buff_head_l *)n)->prev = e; } static inline void skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to) { SKB_TRACE2(from, to); if (skb_queue_empty(from)) return; ___skb_queue_splice(from, (struct sk_buff *)to, to->next); to->qlen += from->qlen; __skb_queue_head_init(from); } static inline void skb_queue_splice_tail_init(struct sk_buff_head *from, struct sk_buff_head *to) { SKB_TRACE2(from, to); if (skb_queue_empty(from)) return; ___skb_queue_splice(from, to->prev, (struct sk_buff *)to); to->qlen += from->qlen; __skb_queue_head_init(from); } static inline void skb_reset_transport_header(struct sk_buff *skb) { SKB_TRACE(skb); skb->l4hdroff = skb->data - skb->head; } static inline uint8_t * skb_transport_header(struct sk_buff *skb) { SKB_TRACE(skb); return (skb->head + skb->l4hdroff); } static inline uint8_t * skb_network_header(struct sk_buff *skb) { SKB_TRACE(skb); return (skb->head + skb->l3hdroff); } static inline bool skb_is_nonlinear(struct sk_buff *skb) { SKB_TRACE(skb); return ((skb->data_len > 0) ? true : false); } static inline int __skb_linearize(struct sk_buff *skb) { SKB_TRACE(skb); SKB_TODO(); return (ENXIO); } static inline int skb_linearize(struct sk_buff *skb) { return (skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0); } static inline int pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp) { SKB_TRACE(skb); SKB_TODO(); return (-ENXIO); } /* Not really seen this one but need it as symmetric accessor function. */ static inline void skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap) { SKB_TRACE_FMT(skb, "qmap %u", qmap); skb->qmap = qmap; } static inline uint16_t skb_get_queue_mapping(struct sk_buff *skb) { SKB_TRACE_FMT(skb, "qmap %u", skb->qmap); return (skb->qmap); } static inline bool skb_header_cloned(struct sk_buff *skb) { SKB_TRACE(skb); SKB_TODO(); return (false); } static inline uint8_t * skb_mac_header(const struct sk_buff *skb) { SKB_TRACE(skb); return (skb->head + skb->mac_header); } static inline void skb_reset_mac_header(struct sk_buff *skb) { SKB_TRACE(skb); skb->mac_header = skb->data - skb->head; } static inline void skb_set_mac_header(struct sk_buff *skb, const size_t len) { SKB_TRACE(skb); skb_reset_mac_header(skb); skb->mac_header += len; } static inline struct skb_shared_hwtstamps * skb_hwtstamps(struct sk_buff *skb) { SKB_TRACE(skb); SKB_TODO(); return (NULL); } static inline void skb_orphan(struct sk_buff *skb) { SKB_TRACE(skb); SKB_TODO(); } static inline __sum16 csum_unfold(__sum16 sum) { SKB_TODO(); return (sum); } static __inline void skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len) { SKB_TODO(); } static inline void skb_reset_tail_pointer(struct sk_buff *skb) { SKB_TRACE(skb); #ifdef SKB_DOING_OFFSETS_US_NOT skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head); #endif skb->tail = skb->data; SKB_TRACE(skb); } static inline struct sk_buff * skb_get(struct sk_buff *skb) { SKB_TODO(); /* XXX refcnt? as in get/put_device? */ return (skb); } static inline struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) { SKB_TODO(); return (NULL); } static inline void skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len) { SKB_TRACE(skb); /* Let us just hope the destination has len space ... */ memcpy(dst, skb->data, len); } static inline int skb_pad(struct sk_buff *skb, int pad) { SKB_TRACE(skb); SKB_TODO(); return (-1); } static inline void skb_list_del_init(struct sk_buff *skb) { SKB_TRACE(skb); SKB_TODO(); } static inline void napi_consume_skb(struct sk_buff *skb, int budget) { SKB_TRACE(skb); SKB_TODO(); } static inline struct sk_buff * napi_build_skb(void *data, size_t len) { SKB_TODO(); return (NULL); } static inline uint32_t skb_get_hash(struct sk_buff *skb) { SKB_TRACE(skb); SKB_TODO(); return (0); } static inline void skb_mark_for_recycle(struct sk_buff *skb) { SKB_TRACE(skb); SKB_TODO(); } static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) { SKB_TRACE(skb); SKB_TODO(); return (-1); } #define SKB_WITH_OVERHEAD(_s) \ (_s) - ALIGN(sizeof(struct skb_shared_info), CACHE_LINE_SIZE) #endif /* _LINUXKPI_LINUX_SKBUFF_H */ diff --git a/sys/compat/linuxkpi/common/src/linux_80211.h b/sys/compat/linuxkpi/common/src/linux_80211.h index a48cf719c693..80be87ebe231 100644 --- a/sys/compat/linuxkpi/common/src/linux_80211.h +++ b/sys/compat/linuxkpi/common/src/linux_80211.h @@ -1,422 +1,428 @@ /*- * Copyright (c) 2020-2023 The FreeBSD Foundation * Copyright (c) 2020-2021 Bjoern A. Zeeb * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Public functions are called linuxkpi_*(). * Internal (static) functions are called lkpi_*(). * * The internal structures holding metadata over public structures are also * called lkpi_xxx (usually with a member at the end called xxx). * Note: we do not replicate the structure names but the general variable names * for these (e.g., struct hw -> struct lkpi_hw, struct sta -> struct lkpi_sta). * There are macros to access one from the other. * We call the internal versions lxxx (e.g., hw -> lhw, sta -> lsta). */ #ifndef _LKPI_SRC_LINUX_80211_H #define _LKPI_SRC_LINUX_80211_H +#include "opt_wlan.h" + +#if defined(IEEE80211_DEBUG) && !defined(LINUXKPI_DEBUG_80211) +#define LINUXKPI_DEBUG_80211 +#endif + /* #define LINUXKPI_DEBUG_80211 */ #ifndef D80211_TODO #define D80211_TODO 0x00000001 #endif #ifndef D80211_IMPROVE #define D80211_IMPROVE 0x00000002 #endif #define D80211_IMPROVE_TXQ 0x00000004 #define D80211_TRACE 0x00000010 #define D80211_TRACEOK 0x00000020 #define D80211_TRACE_TX 0x00000100 #define D80211_TRACE_TX_DUMP 0x00000200 #define D80211_TRACE_RX 0x00001000 #define D80211_TRACE_RX_DUMP 0x00002000 #define D80211_TRACE_RX_BEACONS 0x00004000 #define D80211_TRACEX (D80211_TRACE_TX|D80211_TRACE_RX) #define D80211_TRACEX_DUMP (D80211_TRACE_TX_DUMP|D80211_TRACE_RX_DUMP) #define D80211_TRACE_STA 0x00010000 #define D80211_TRACE_MO 0x00100000 #define D80211_TRACE_MODE 0x0f000000 #define D80211_TRACE_MODE_HT 0x01000000 #define D80211_TRACE_MODE_VHT 0x02000000 #define D80211_TRACE_MODE_HE 0x04000000 #define D80211_TRACE_MODE_EHT 0x08000000 #define IMPROVE_TXQ(...) \ if (linuxkpi_debug_80211 & D80211_IMPROVE_TXQ) \ printf("%s:%d: XXX LKPI80211 IMPROVE_TXQ\n", __func__, __LINE__) #define IMPROVE_HT(...) \ if (linuxkpi_debug_80211 & D80211_TRACE_MODE_HT) \ printf("%s:%d: XXX LKPI80211 IMPROVE_HT\n", __func__, __LINE__) #define MTAG_ABI_LKPI80211 1707696513 /* LinuxKPI 802.11 KBI */ /* * Deferred RX path. * We need to pass *ni along (and possibly more in the future so * we use a struct right from the start. */ #define LKPI80211_TAG_RXNI 0 /* deferred RX path */ struct lkpi_80211_tag_rxni { struct ieee80211_node *ni; /* MUST hold a reference to it. */ }; struct lkpi_radiotap_tx_hdr { struct ieee80211_radiotap_header wt_ihdr; uint8_t wt_flags; uint8_t wt_rate; uint16_t wt_chan_freq; uint16_t wt_chan_flags; } __packed; #define LKPI_RTAP_TX_FLAGS_PRESENT \ ((1 << IEEE80211_RADIOTAP_FLAGS) | \ (1 << IEEE80211_RADIOTAP_RATE) | \ (1 << IEEE80211_RADIOTAP_CHANNEL)) struct lkpi_radiotap_rx_hdr { struct ieee80211_radiotap_header wr_ihdr; uint64_t wr_tsft; uint8_t wr_flags; uint8_t wr_rate; uint16_t wr_chan_freq; uint16_t wr_chan_flags; int8_t wr_dbm_antsignal; int8_t wr_dbm_antnoise; } __packed __aligned(8); #define LKPI_RTAP_RX_FLAGS_PRESENT \ ((1 << IEEE80211_RADIOTAP_TSFT) | \ (1 << IEEE80211_RADIOTAP_FLAGS) | \ (1 << IEEE80211_RADIOTAP_RATE) | \ (1 << IEEE80211_RADIOTAP_CHANNEL) | \ (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \ (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE)) struct lkpi_txq { TAILQ_ENTRY(lkpi_txq) txq_entry; struct mtx ltxq_mtx; bool seen_dequeue; bool stopped; uint32_t txq_generation; struct sk_buff_head skbq; /* Must be last! */ struct ieee80211_txq txq __aligned(CACHE_LINE_SIZE); }; #define TXQ_TO_LTXQ(_txq) container_of(_txq, struct lkpi_txq, txq) struct lkpi_sta { TAILQ_ENTRY(lkpi_sta) lsta_entry; struct ieee80211_node *ni; /* Deferred TX path. */ /* Eventually we might want to migrate this into net80211 entirely. */ /* XXX-BZ can we use sta->txq[] instead directly? */ struct task txq_task; struct mbufq txq; struct mtx txq_mtx; struct ieee80211_key_conf *kc; enum ieee80211_sta_state state; bool txq_ready; /* Can we run the taskq? */ bool added_to_drv; /* Driver knows; i.e. we called ...(). */ bool in_mgd; /* XXX-BZ should this be per-vif? */ /* Must be last! */ struct ieee80211_sta sta __aligned(CACHE_LINE_SIZE); }; #define STA_TO_LSTA(_sta) container_of(_sta, struct lkpi_sta, sta) #define LSTA_TO_STA(_lsta) (&(_lsta)->sta) struct lkpi_vif { TAILQ_ENTRY(lkpi_vif) lvif_entry; struct ieee80211vap iv_vap; struct mtx mtx; struct wireless_dev wdev; /* Other local stuff. */ int (*iv_newstate)(struct ieee80211vap *, enum ieee80211_state, int); struct ieee80211_node * (*iv_update_bss)(struct ieee80211vap *, struct ieee80211_node *); TAILQ_HEAD(, lkpi_sta) lsta_head; struct lkpi_sta *lvif_bss; bool lvif_bss_synched; bool added_to_drv; /* Driver knows; i.e. we called add_interface(). */ bool hw_queue_stopped[IEEE80211_NUM_ACS]; /* Must be last! */ struct ieee80211_vif vif __aligned(CACHE_LINE_SIZE); }; #define VAP_TO_LVIF(_vap) container_of(_vap, struct lkpi_vif, iv_vap) #define LVIF_TO_VAP(_lvif) (&(_lvif)->iv_vap) #define VIF_TO_LVIF(_vif) container_of(_vif, struct lkpi_vif, vif) #define LVIF_TO_VIF(_lvif) (&(_lvif)->vif) struct lkpi_hw { /* name it mac80211_sc? */ const struct ieee80211_ops *ops; struct ieee80211_scan_request *hw_req; struct workqueue_struct *workq; /* FreeBSD specific compat. */ /* Linux device is in hw.wiphy->dev after SET_IEEE80211_DEV(). */ struct ieee80211com *ic; struct lkpi_radiotap_tx_hdr rtap_tx; struct lkpi_radiotap_rx_hdr rtap_rx; TAILQ_HEAD(, lkpi_vif) lvif_head; struct sx lvif_sx; struct sx sx; struct mtx txq_mtx; uint32_t txq_generation[IEEE80211_NUM_ACS]; TAILQ_HEAD(, lkpi_txq) scheduled_txqs[IEEE80211_NUM_ACS]; /* Deferred RX path. */ struct task rxq_task; struct mbufq rxq; struct mtx rxq_mtx; /* Scan functions we overload to handle depending on scan mode. */ void (*ic_scan_curchan)(struct ieee80211_scan_state *, unsigned long); void (*ic_scan_mindwell)(struct ieee80211_scan_state *); /* Node functions we overload to sync state. */ struct ieee80211_node * (*ic_node_alloc)(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN]); int (*ic_node_init)(struct ieee80211_node *); void (*ic_node_cleanup)(struct ieee80211_node *); void (*ic_node_free)(struct ieee80211_node *); /* HT and later functions. */ int (*ic_recv_action)(struct ieee80211_node *, const struct ieee80211_frame *, const uint8_t *, const uint8_t *); int (*ic_send_action)(struct ieee80211_node *, int, int, void *); int (*ic_ampdu_enable)(struct ieee80211_node *, struct ieee80211_tx_ampdu *); int (*ic_addba_request)(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int, int, int); int (*ic_addba_response)(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int, int, int); void (*ic_addba_stop)(struct ieee80211_node *, struct ieee80211_tx_ampdu *); void (*ic_addba_response_timeout)(struct ieee80211_node *, struct ieee80211_tx_ampdu *); void (*ic_bar_response)(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int); int (*ic_ampdu_rx_start)(struct ieee80211_node *, struct ieee80211_rx_ampdu *, int, int, int); void (*ic_ampdu_rx_stop)(struct ieee80211_node *, struct ieee80211_rx_ampdu *); #define LKPI_MAC80211_DRV_STARTED 0x00000001 uint32_t sc_flags; #define LKPI_LHW_SCAN_RUNNING 0x00000001 #define LKPI_LHW_SCAN_HW 0x00000002 uint32_t scan_flags; struct mtx scan_mtx; int supbands; /* Number of supported bands. */ int max_rates; /* Maximum number of bitrates supported in any channel. */ int scan_ie_len; /* Length of common per-band scan IEs. */ bool update_mc; bool update_wme; bool rxq_stopped; /* Must be last! */ struct ieee80211_hw hw __aligned(CACHE_LINE_SIZE); }; #define LHW_TO_HW(_lhw) (&(_lhw)->hw) #define HW_TO_LHW(_hw) container_of(_hw, struct lkpi_hw, hw) struct lkpi_chanctx { bool added_to_drv; /* Managed by MO */ struct ieee80211_chanctx_conf chanctx_conf __aligned(CACHE_LINE_SIZE); }; #define LCHANCTX_TO_CHANCTX_CONF(_lchanctx) \ (&(_lchanctx)->chanctx_conf) #define CHANCTX_CONF_TO_LCHANCTX(_conf) \ container_of(_conf, struct lkpi_chanctx, chanctx_conf) struct lkpi_wiphy { const struct cfg80211_ops *ops; /* Must be last! */ struct wiphy wiphy __aligned(CACHE_LINE_SIZE); }; #define WIPHY_TO_LWIPHY(_wiphy) container_of(_wiphy, struct lkpi_wiphy, wiphy) #define LWIPHY_TO_WIPHY(_lwiphy) (&(_lwiphy)->wiphy) #define LKPI_80211_LHW_LOCK_INIT(_lhw) \ sx_init_flags(&(_lhw)->sx, "lhw", SX_RECURSE); #define LKPI_80211_LHW_LOCK_DESTROY(_lhw) \ sx_destroy(&(_lhw)->sx); #define LKPI_80211_LHW_LOCK(_lhw) \ sx_xlock(&(_lhw)->sx) #define LKPI_80211_LHW_UNLOCK(_lhw) \ sx_xunlock(&(_lhw)->sx) #define LKPI_80211_LHW_LOCK_ASSERT(_lhw) \ sx_assert(&(_lhw)->sx, SA_LOCKED) #define LKPI_80211_LHW_UNLOCK_ASSERT(_lhw) \ sx_assert(&(_lhw)->sx, SA_UNLOCKED) #define LKPI_80211_LHW_SCAN_LOCK_INIT(_lhw) \ mtx_init(&(_lhw)->scan_mtx, "lhw-scan", NULL, MTX_DEF | MTX_RECURSE); #define LKPI_80211_LHW_SCAN_LOCK_DESTROY(_lhw) \ mtx_destroy(&(_lhw)->scan_mtx); #define LKPI_80211_LHW_SCAN_LOCK(_lhw) \ mtx_lock(&(_lhw)->scan_mtx) #define LKPI_80211_LHW_SCAN_UNLOCK(_lhw) \ mtx_unlock(&(_lhw)->scan_mtx) #define LKPI_80211_LHW_SCAN_LOCK_ASSERT(_lhw) \ mtx_assert(&(_lhw)->scan_mtx, MA_OWNED) #define LKPI_80211_LHW_SCAN_UNLOCK_ASSERT(_lhw) \ mtx_assert(&(_lhw)->scan_mtx, MA_NOTOWNED) #define LKPI_80211_LHW_TXQ_LOCK_INIT(_lhw) \ mtx_init(&(_lhw)->txq_mtx, "lhw-txq", NULL, MTX_DEF | MTX_RECURSE); #define LKPI_80211_LHW_TXQ_LOCK_DESTROY(_lhw) \ mtx_destroy(&(_lhw)->txq_mtx); #define LKPI_80211_LHW_TXQ_LOCK(_lhw) \ mtx_lock(&(_lhw)->txq_mtx) #define LKPI_80211_LHW_TXQ_UNLOCK(_lhw) \ mtx_unlock(&(_lhw)->txq_mtx) #define LKPI_80211_LHW_TXQ_LOCK_ASSERT(_lhw) \ mtx_assert(&(_lhw)->txq_mtx, MA_OWNED) #define LKPI_80211_LHW_TXQ_UNLOCK_ASSERT(_lhw) \ mtx_assert(&(_lhw)->txq_mtx, MA_NOTOWNED) #define LKPI_80211_LHW_RXQ_LOCK_INIT(_lhw) \ mtx_init(&(_lhw)->rxq_mtx, "lhw-rxq", NULL, MTX_DEF | MTX_RECURSE); #define LKPI_80211_LHW_RXQ_LOCK_DESTROY(_lhw) \ mtx_destroy(&(_lhw)->rxq_mtx); #define LKPI_80211_LHW_RXQ_LOCK(_lhw) \ mtx_lock(&(_lhw)->rxq_mtx) #define LKPI_80211_LHW_RXQ_UNLOCK(_lhw) \ mtx_unlock(&(_lhw)->rxq_mtx) #define LKPI_80211_LHW_RXQ_LOCK_ASSERT(_lhw) \ mtx_assert(&(_lhw)->rxq_mtx, MA_OWNED) #define LKPI_80211_LHW_RXQ_UNLOCK_ASSERT(_lhw) \ mtx_assert(&(_lhw)->rxq_mtx, MA_NOTOWNED) #define LKPI_80211_LHW_LVIF_LOCK(_lhw) sx_xlock(&(_lhw)->lvif_sx) #define LKPI_80211_LHW_LVIF_UNLOCK(_lhw) sx_xunlock(&(_lhw)->lvif_sx) #define LKPI_80211_LVIF_LOCK(_lvif) mtx_lock(&(_lvif)->mtx) #define LKPI_80211_LVIF_UNLOCK(_lvif) mtx_unlock(&(_lvif)->mtx) #define LKPI_80211_LSTA_TXQ_LOCK_INIT(_lsta) \ mtx_init(&(_lsta)->txq_mtx, "lsta-txq", NULL, MTX_DEF); #define LKPI_80211_LSTA_TXQ_LOCK_DESTROY(_lsta) \ mtx_destroy(&(_lsta)->txq_mtx); #define LKPI_80211_LSTA_TXQ_LOCK(_lsta) \ mtx_lock(&(_lsta)->txq_mtx) #define LKPI_80211_LSTA_TXQ_UNLOCK(_lsta) \ mtx_unlock(&(_lsta)->txq_mtx) #define LKPI_80211_LSTA_TXQ_LOCK_ASSERT(_lsta) \ mtx_assert(&(_lsta)->txq_mtx, MA_OWNED) #define LKPI_80211_LSTA_TXQ_UNLOCK_ASSERT(_lsta) \ mtx_assert(&(_lsta)->txq_mtx, MA_NOTOWNED) #define LKPI_80211_LTXQ_LOCK_INIT(_ltxq) \ mtx_init(&(_ltxq)->ltxq_mtx, "ltxq", NULL, MTX_DEF); #define LKPI_80211_LTXQ_LOCK_DESTROY(_ltxq) \ mtx_destroy(&(_ltxq)->ltxq_mtx); #define LKPI_80211_LTXQ_LOCK(_ltxq) \ mtx_lock(&(_ltxq)->ltxq_mtx) #define LKPI_80211_LTXQ_UNLOCK(_ltxq) \ mtx_unlock(&(_ltxq)->ltxq_mtx) #define LKPI_80211_LTXQ_LOCK_ASSERT(_ltxq) \ mtx_assert(&(_ltxq)->ltxq_mtx, MA_OWNED) #define LKPI_80211_LTXQ_UNLOCK_ASSERT(_ltxq) \ mtx_assert(&(_ltxq)->ltxq_mtx, MA_NOTOWNED) int lkpi_80211_mo_start(struct ieee80211_hw *); void lkpi_80211_mo_stop(struct ieee80211_hw *); int lkpi_80211_mo_get_antenna(struct ieee80211_hw *, u32 *, u32 *); int lkpi_80211_mo_set_frag_threshold(struct ieee80211_hw *, uint32_t); int lkpi_80211_mo_set_rts_threshold(struct ieee80211_hw *, uint32_t); int lkpi_80211_mo_add_interface(struct ieee80211_hw *, struct ieee80211_vif *); void lkpi_80211_mo_remove_interface(struct ieee80211_hw *, struct ieee80211_vif *); int lkpi_80211_mo_hw_scan(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_scan_request *); void lkpi_80211_mo_cancel_hw_scan(struct ieee80211_hw *, struct ieee80211_vif *); void lkpi_80211_mo_sw_scan_complete(struct ieee80211_hw *, struct ieee80211_vif *); void lkpi_80211_mo_sw_scan_start(struct ieee80211_hw *, struct ieee80211_vif *, const u8 *); u64 lkpi_80211_mo_prepare_multicast(struct ieee80211_hw *, struct netdev_hw_addr_list *); void lkpi_80211_mo_configure_filter(struct ieee80211_hw *, unsigned int, unsigned int *, u64); int lkpi_80211_mo_sta_state(struct ieee80211_hw *, struct ieee80211_vif *, struct lkpi_sta *, enum ieee80211_sta_state); int lkpi_80211_mo_config(struct ieee80211_hw *, uint32_t); int lkpi_80211_mo_assign_vif_chanctx(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, struct ieee80211_chanctx_conf *); void lkpi_80211_mo_unassign_vif_chanctx(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, struct ieee80211_chanctx_conf **); int lkpi_80211_mo_add_chanctx(struct ieee80211_hw *, struct ieee80211_chanctx_conf *); void lkpi_80211_mo_change_chanctx(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, uint32_t); void lkpi_80211_mo_remove_chanctx(struct ieee80211_hw *, struct ieee80211_chanctx_conf *); void lkpi_80211_mo_bss_info_changed(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, uint64_t); int lkpi_80211_mo_conf_tx(struct ieee80211_hw *, struct ieee80211_vif *, uint32_t, uint16_t, const struct ieee80211_tx_queue_params *); void lkpi_80211_mo_flush(struct ieee80211_hw *, struct ieee80211_vif *, uint32_t, bool); void lkpi_80211_mo_mgd_prepare_tx(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_prep_tx_info *); void lkpi_80211_mo_mgd_complete_tx(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_prep_tx_info *); void lkpi_80211_mo_tx(struct ieee80211_hw *, struct ieee80211_tx_control *, struct sk_buff *); void lkpi_80211_mo_wake_tx_queue(struct ieee80211_hw *, struct ieee80211_txq *); void lkpi_80211_mo_sync_rx_queues(struct ieee80211_hw *); void lkpi_80211_mo_sta_pre_rcu_remove(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); int lkpi_80211_mo_set_key(struct ieee80211_hw *, enum set_key_cmd, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *); int lkpi_80211_mo_ampdu_action(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_ampdu_params *); #endif /* _LKPI_SRC_LINUX_80211_H */